The __mod_lruvec_state is already safe against irqs, so there is no need to have a separate interface (i.e. mod_lruvec_state) which wraps calls to it with irq disabling and reenabling. Let's rename __mod_lruvec_state to mod_lruvec_state. Signed-off-by: Shakeel Butt --- include/linux/mm_inline.h | 2 +- include/linux/vmstat.h | 18 +----------------- mm/memcontrol.c | 8 ++++---- mm/migrate.c | 20 ++++++++++---------- mm/vmscan.c | 4 ++-- 5 files changed, 18 insertions(+), 34 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 795b255abf65..d7b963255012 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, lockdep_assert_held(&lruvec->lru_lock); WARN_ON_ONCE(nr_pages != (int)nr_pages); - __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); + mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 11a37aaa4dd9..4eb7753e6e5c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -520,19 +520,9 @@ static inline const char *vm_event_name(enum vm_event_item item) #ifdef CONFIG_MEMCG -void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); -static inline void mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_state(lruvec, idx, val); - local_irq_restore(flags); -} - void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val); @@ -554,12 +544,6 @@ static inline void mod_lruvec_page_state(struct page *page, #else -static inline void __mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - mod_node_page_state(lruvec_pgdat(lruvec), idx, val); -} - static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a59d3ee92a7..c31074e5852b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -757,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec, } /** - * __mod_lruvec_state - update lruvec memory statistics + * mod_lruvec_state - update lruvec memory statistics * @lruvec: the lruvec * @idx: the stat item * @val: delta to add to the counter, can be negative @@ -766,7 +766,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec, * function updates the all three counters that are affected by a * change of state at this level: per-node, per-cgroup, per-lruvec. */ -void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { /* Update node */ @@ -794,7 +794,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, } lruvec = mem_cgroup_lruvec(memcg, pgdat); - __mod_lruvec_state(lruvec, idx, val); + mod_lruvec_state(lruvec, idx, val); rcu_read_unlock(); } EXPORT_SYMBOL(__lruvec_stat_mod_folio); @@ -818,7 +818,7 @@ void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) mod_node_page_state(pgdat, idx, val); } else { lruvec = mem_cgroup_lruvec(memcg, pgdat); - __mod_lruvec_state(lruvec, idx, val); + mod_lruvec_state(lruvec, idx, val); } rcu_read_unlock(); } diff --git a/mm/migrate.c b/mm/migrate.c index 567dfae4d9f8..be00c3c82f3a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -675,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping, old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); - __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); + mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); + mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); - __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM, nr); if (folio_test_pmd_mappable(folio)) { - __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); - __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); + mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); + mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { - __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); - __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); + mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); + mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); } #endif if (dirty && mapping_can_writeback(mapping)) { - __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); + mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); - __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); + mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } diff --git a/mm/vmscan.c b/mm/vmscan.c index ba760072830b..b3231bdde4e6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2019,7 +2019,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, spin_lock_irq(&lruvec->lru_lock); move_folios_to_lru(lruvec, &folio_list); - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), + mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); item = PGSTEAL_KSWAPD + reclaimer_offset(sc); @@ -4745,7 +4745,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec, reset_batch_size(walk); } - __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), + mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc), stat.nr_demoted); item = PGSTEAL_KSWAPD + reclaimer_offset(sc); -- 2.47.3