From: Daniel Wagner The core scheduler recently transitioned to compiling SMP data structures unconditionally to reduce code complexity - see commit cac5cefbade9 ("sched/smp: Make SMP unconditional"). In alignment with this philosophy of reducing dual-path maintenance, this patch removes the #ifdef CONFIG_SMP guards and the dedicated !SMP fallback logic here. While the !SMP path provided a slightly simpler execution flow for uniprocessor kernels (avoiding SMP-specific overhead), maintaining these separate code paths adds unnecessary complexity and testing burden. Removing these guards simplifies the codebase by standardizing entirely on the SMP logic, which safely resolves to single-CPU operations on UP configurations. Signed-off-by: Daniel Wagner Reviewed-by: Martin K. Petersen Reviewed-by: Hannes Reinecke [atomlin: Updated commit message to clarify !SMP removal context] Signed-off-by: Aaron Tomlin --- lib/group_cpus.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/lib/group_cpus.c b/lib/group_cpus.c index e6e18d7a49bb..b8d54398f88a 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -9,8 +9,6 @@ #include #include -#ifdef CONFIG_SMP - static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_grp) { @@ -564,22 +562,4 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) *nummasks = min(nr_present + nr_others, numgrps); return masks; } -#else /* CONFIG_SMP */ -struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks) -{ - struct cpumask *masks; - - if (numgrps == 0) - return NULL; - - masks = kzalloc_objs(*masks, numgrps); - if (!masks) - return NULL; - - /* assign all CPUs(cpu 0) to the 1st group only */ - cpumask_copy(&masks[0], cpu_possible_mask); - *nummasks = 1; - return masks; -} -#endif /* CONFIG_SMP */ EXPORT_SYMBOL_GPL(group_cpus_evenly); -- 2.51.0