The macros slub_get_cpu_ptr()/slub_put_cpu_ptr() are now unused, remove them. USE_LOCKLESS_FAST_PATH() has lost its true meaning with the code being removed. The only remaining usage is in fact testing whether we can assert irqs disabled, because spin_lock_irqsave() only does that on !RT. Test for CONFIG_PREEMPT_RT instead. Reviewed-by: Suren Baghdasaryan Signed-off-by: Vlastimil Babka --- mm/slub.c | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index bb72cfa2d7ec..d52de6e3c2d5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -201,28 +201,6 @@ enum slab_flags { SL_pfmemalloc = PG_active, /* Historical reasons for this bit */ }; -/* - * We could simply use migrate_disable()/enable() but as long as it's a - * function call even on !PREEMPT_RT, use inline preempt_disable() there. - */ -#ifndef CONFIG_PREEMPT_RT -#define slub_get_cpu_ptr(var) get_cpu_ptr(var) -#define slub_put_cpu_ptr(var) put_cpu_ptr(var) -#define USE_LOCKLESS_FAST_PATH() (true) -#else -#define slub_get_cpu_ptr(var) \ -({ \ - migrate_disable(); \ - this_cpu_ptr(var); \ -}) -#define slub_put_cpu_ptr(var) \ -do { \ - (void)(var); \ - migrate_enable(); \ -} while (0) -#define USE_LOCKLESS_FAST_PATH() (false) -#endif - #ifndef CONFIG_SLUB_TINY #define __fastpath_inline __always_inline #else @@ -719,7 +697,7 @@ static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *sla { bool ret; - if (USE_LOCKLESS_FAST_PATH()) + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) lockdep_assert_irqs_disabled(); if (s->flags & __CMPXCHG_DOUBLE) -- 2.52.0