Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. queue_work() / queue_delayed_work() / mod_delayed_work() will now use the new unbound wq: whether the user still use the old wq a warn will be printed along with a wq redirect to the new one. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo Signed-off-by: Marco Crivellari --- mm/backing-dev.c | 2 +- mm/kfence/core.c | 6 +++--- mm/memcontrol.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 783904d8c5ef..e9f9fdcfe052 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -934,7 +934,7 @@ void wb_memcg_offline(struct mem_cgroup *memcg) memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); - queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); + queue_work(system_dfl_wq, &cleanup_offline_cgwbs_work); } /** diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 102048821c22..f26d87d59296 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -854,7 +854,7 @@ static void toggle_allocation_gate(struct work_struct *work) /* Disable static key and reset timer. */ static_branch_disable(&kfence_allocation_key); #endif - queue_delayed_work(system_unbound_wq, &kfence_timer, + queue_delayed_work(system_dfl_wq, &kfence_timer, msecs_to_jiffies(kfence_sample_interval)); } @@ -900,7 +900,7 @@ static void kfence_init_enable(void) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, @@ -996,7 +996,7 @@ static int kfence_enable_late(void) return kfence_init_late(); WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + queue_delayed_work(system_dfl_wq, &kfence_timer, 0); pr_info("re-enabled\n"); return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 421740f1bcdc..c2944bc83378 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -651,7 +651,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w) * in latency-sensitive paths is as cheap as possible. */ __mem_cgroup_flush_stats(root_mem_cgroup, true); - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); } unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) @@ -3732,7 +3732,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) goto offline_kmem; if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) - queue_delayed_work(system_unbound_wq, &stats_flush_dwork, + queue_delayed_work(system_dfl_wq, &stats_flush_dwork, FLUSH_TIME); lru_gen_online_memcg(memcg); -- 2.51.0