From: Alexei Starovoitov Clarify comments regarding pfmemalloc and NUMA preferences when ___slab_alloc() operating in !allow_spin mode. Signed-off-by: Alexei Starovoitov --- mm/slub.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 83983de948f3..c995f3bec69d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4456,9 +4456,17 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, /* * same as above but node_match() being false already * implies node != NUMA_NO_NODE. - * Reentrant slub cannot take locks necessary to - * deactivate_slab, hence ignore node preference. - * kmalloc_nolock() doesn't allow __GFP_THISNODE. + * + * We don't strictly honor pfmemalloc and NUMA preferences + * when !allow_spin because: + * + * 1. Most kmalloc() users allocate objects on the local node, + * so kmalloc_nolock() tries not to interfere with them by + * deactivating the cpu slab. + * + * 2. Deactivating due to NUMA or pfmemalloc mismatch may cause + * unnecessary slab allocations even when n->partial list + * is not empty. */ if (!node_isset(node, slab_nodes) || !allow_spin) { @@ -4547,11 +4555,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, slab = slub_percpu_partial(c); slub_set_percpu_partial(c, slab); - /* - * Reentrant slub cannot take locks necessary for - * __put_partials(), hence ignore node preference. - * kmalloc_nolock() doesn't allow __GFP_THISNODE. - */ if (likely(node_match(slab, node) && pfmemalloc_match(slab, gfpflags)) || !allow_spin) { -- 2.47.3