When slab objects are freed with kfree_rcu() and not call_rcu(), using struct rcu_head (16 bytes on 64-bit) is unnecessary and struct rcu_ptr (8 bytes on 64-bit) is enough. Save one pointer per slab object by using struct rcu_ptr. Signed-off-by: Harry Yoo --- include/linux/list_lru.h | 2 +- include/linux/shrinker.h | 2 +- mm/vmalloc.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index fe739d35a864..c79bccb7dafa 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -37,7 +37,7 @@ struct list_lru_one { }; struct list_lru_memcg { - struct rcu_head rcu; + struct rcu_ptr rcu; /* array of per cgroup per node lists, indexed by node id */ struct list_lru_one node[]; }; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 1a00be90d93a..bad20de2803a 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -19,7 +19,7 @@ struct shrinker_info_unit { }; struct shrinker_info { - struct rcu_head rcu; + struct rcu_ptr rcu; int map_nr_max; struct shrinker_info_unit *unit[]; }; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 41dd01e8430c..89c781dcab58 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2596,7 +2596,7 @@ struct vmap_block { DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); unsigned long dirty_min, dirty_max; /*< dirty range */ struct list_head free_list; - struct rcu_head rcu_head; + struct rcu_ptr rcu; struct list_head purge; unsigned int cpu; }; @@ -2765,7 +2765,7 @@ static void free_vmap_block(struct vmap_block *vb) spin_unlock(&vn->busy.lock); free_vmap_area_noflush(vb->va); - kfree_rcu(vb, rcu_head); + kfree_rcu(vb, rcu); } static bool purge_fragmented_block(struct vmap_block *vb, -- 2.43.0