group_mask_cpu_evenly() allows the caller to pass in a CPU mask that should be evenly distributed. This new function is a more generic version of the existing group_cpus_evenly(), which always distributes all present CPUs into groups. Reviewed-by: Hannes Reinecke Signed-off-by: Daniel Wagner --- include/linux/group_cpus.h | 3 +++ lib/group_cpus.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h index 9d4e5ab6c314b31c09fda82c3f6ac18f77e9de36..defab4123a82fa37cb2a9920029be8e3e121ca0d 100644 --- a/include/linux/group_cpus.h +++ b/include/linux/group_cpus.h @@ -10,5 +10,8 @@ #include struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks); +struct cpumask *group_mask_cpus_evenly(unsigned int numgrps, + const struct cpumask *mask, + unsigned int *nummasks); #endif diff --git a/lib/group_cpus.c b/lib/group_cpus.c index f254b232522d44c141cdc4e44e2c99a4148c08d6..ec0852132266618f540c580422f254684129ce90 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -8,6 +8,7 @@ #include #include #include +#include static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_grp) @@ -424,3 +425,61 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) return masks; } EXPORT_SYMBOL_GPL(group_cpus_evenly); + +/** + * group_mask_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality + * @numgrps: number of cpumasks to create + * @mask: CPUs to consider for the grouping + * @nummasks: number of initialized cpusmasks + * + * Return: cpumask array if successful, NULL otherwise. Only the CPUs + * marked in the mask will be considered for the grouping. And each + * element includes CPUs assigned to this group. nummasks contains the + * number of initialized masks which can be less than numgrps. cpu_mask + * + * Try to put close CPUs from viewpoint of CPU and NUMA locality into + * same group, and run two-stage grouping: + * 1) allocate present CPUs on these groups evenly first + * 2) allocate other possible CPUs on these groups evenly + * + * We guarantee in the resulted grouping that all CPUs are covered, and + * no same CPU is assigned to multiple groups + */ +struct cpumask *group_mask_cpus_evenly(unsigned int numgrps, + const struct cpumask *mask, + unsigned int *nummasks) +{ + cpumask_var_t *node_to_cpumask; + cpumask_var_t nmsk; + int ret = -ENOMEM; + struct cpumask *masks = NULL; + + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return NULL; + + node_to_cpumask = alloc_node_to_cpumask(); + if (!node_to_cpumask) + goto fail_nmsk; + + masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); + if (!masks) + goto fail_node_to_cpumask; + + build_node_to_cpumask(node_to_cpumask); + + ret = __group_cpus_evenly(0, numgrps, node_to_cpumask, mask, nmsk, + masks); + +fail_node_to_cpumask: + free_node_to_cpumask(node_to_cpumask); + +fail_nmsk: + free_cpumask_var(nmsk); + if (ret < 0) { + kfree(masks); + return NULL; + } + *nummasks = ret; + return masks; +} +EXPORT_SYMBOL_GPL(group_mask_cpus_evenly); -- 2.51.0