From: Kairui Song The cgroup v1 swap helpers always operate on swap cache folios whose swap entry is stable: the folio is locked and in the swap cache. There is no need to pass the swap entry or page count as separate parameters when they can be derived from the folio itself. Simplify the redundant parameters and add sanity checks to document the required preconditions. Also rename memcg1_swapout to __memcg1_swapout to indicate it requires special calling context: the folio must be isolated and dying, and the call must be made with interrupts disabled. No functional change. Acked-by: Chris Li Signed-off-by: Kairui Song --- include/linux/memcontrol.h | 8 ++++---- include/linux/swap.h | 10 ++++------ mm/huge_memory.c | 2 +- mm/memcontrol-v1.c | 33 ++++++++++++++++++++------------- mm/memcontrol.c | 9 ++++----- mm/swap_state.c | 4 ++-- mm/swapfile.c | 2 +- mm/vmscan.c | 2 +- 8 files changed, 37 insertions(+), 33 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index dc3fa687759b..7d08128de1fd 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1899,8 +1899,8 @@ static inline void mem_cgroup_exit_user_fault(void) current->in_user_fault = 0; } -void memcg1_swapout(struct folio *folio, swp_entry_t entry); -void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); +void __memcg1_swapout(struct folio *folio); +void memcg1_swapin(struct folio *folio); #else /* CONFIG_MEMCG_V1 */ static inline @@ -1929,11 +1929,11 @@ static inline void mem_cgroup_exit_user_fault(void) { } -static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) +static inline void __memcg1_swapout(struct folio *folio) { } -static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +static inline void memcg1_swapin(struct folio *folio) { } diff --git a/include/linux/swap.h b/include/linux/swap.h index aa89e1d30a77..6b3acdf9bdd4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -576,13 +576,12 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); -static inline int mem_cgroup_try_charge_swap(struct folio *folio, - swp_entry_t entry) +int __mem_cgroup_try_charge_swap(struct folio *folio); +static inline int mem_cgroup_try_charge_swap(struct folio *folio) { if (mem_cgroup_disabled()) return 0; - return __mem_cgroup_try_charge_swap(folio, entry); + return __mem_cgroup_try_charge_swap(folio); } extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); @@ -596,8 +595,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else -static inline int mem_cgroup_try_charge_swap(struct folio *folio, - swp_entry_t entry) +static inline int mem_cgroup_try_charge_swap(struct folio *folio) { return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c565b2a651e0..42b86e8ab7c0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4430,7 +4430,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) /* * Exclude swapcache: originally to avoid a corrupt deferred split - * queue. Nowadays that is fully prevented by memcg1_swapout(); + * queue. Nowadays that is fully prevented by __memcg1_swapout(); * but if page reclaim is already handling the same folio, it is * unnecessary to handle it again in the shrinker, so excluding * swapcache here may still be a useful optimization. diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 433bba9dfe71..36c507d81dc5 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -604,18 +604,23 @@ void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg) } /** - * memcg1_swapout - transfer a memsw charge to swap + * __memcg1_swapout - transfer a memsw charge to swap * @folio: folio whose memsw charge to transfer - * @entry: swap entry to move the charge to * - * Transfer the memsw charge of @folio to @entry. + * Transfer the memsw charge of @folio to the swap entry stored in + * folio->swap. + * + * Context: folio must be isolated, unmapped, locked and is just about + * to be freed, and caller must disable IRQs. */ -void memcg1_swapout(struct folio *folio, swp_entry_t entry) +void __memcg1_swapout(struct folio *folio) { struct mem_cgroup *memcg, *swap_memcg; struct obj_cgroup *objcg; unsigned int nr_entries; + VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); + VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); @@ -641,7 +646,7 @@ void memcg1_swapout(struct folio *folio, swp_entry_t entry) swap_memcg = mem_cgroup_private_id_get_online(memcg, nr_entries); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); - swap_cgroup_record(folio, mem_cgroup_private_id(swap_memcg), entry); + swap_cgroup_record(folio, mem_cgroup_private_id(swap_memcg), folio->swap); folio_unqueue_deferred_split(folio); folio->memcg_data = 0; @@ -671,18 +676,20 @@ void memcg1_swapout(struct folio *folio, swp_entry_t entry) obj_cgroup_put(objcg); } -/* +/** * memcg1_swapin - uncharge swap slot - * @entry: the first swap entry for which the pages are charged - * @nr_pages: number of pages which will be uncharged + * @folio: folio being swapped in * - * Call this function after successfully adding the charged page to swapcache. + * Call this function after successfully adding the charged + * folio to swapcache. * - * Note: This function assumes the page for which swap slot is being uncharged - * is order 0 page. + * Context: The folio has to be in swap cache and locked. */ -void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) +void memcg1_swapin(struct folio *folio) { + VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); + VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); + /* * Cgroup1's unified memory+swap counter has been charged with the * new swapcache page, finish the transfer by uncharging the swap @@ -701,7 +708,7 @@ void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) * let's not wait for it. The page already received a * memory+swap charge, drop the swap entry duplicate. */ - mem_cgroup_uncharge_swap(entry, nr_pages); + mem_cgroup_uncharge_swap(folio->swap, folio_nr_pages(folio)); } } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d978e18b9b2d..a28a68eed7ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5464,13 +5464,12 @@ int __init mem_cgroup_init(void) /** * __mem_cgroup_try_charge_swap - try charging swap space for a folio * @folio: folio being added to swap - * @entry: swap entry to charge * - * Try to charge @folio's memcg for the swap space at @entry. + * Try to charge @folio's memcg for the swap space at folio->swap. * * Returns 0 on success, -ENOMEM on failure. */ -int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) +int __mem_cgroup_try_charge_swap(struct folio *folio) { unsigned int nr_pages = folio_nr_pages(folio); struct page_counter *counter; @@ -5487,7 +5486,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) rcu_read_lock(); memcg = obj_cgroup_memcg(objcg); - if (!entry.val) { + if (!folio_test_swapcache(folio)) { memcg_memory_event(memcg, MEMCG_SWAP_FAIL); rcu_read_unlock(); return 0; @@ -5506,7 +5505,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) } mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); - swap_cgroup_record(folio, mem_cgroup_private_id(memcg), entry); + swap_cgroup_record(folio, mem_cgroup_private_id(memcg), folio->swap); return 0; } diff --git a/mm/swap_state.c b/mm/swap_state.c index f177c4b3ea7a..cdb7859eb502 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -455,8 +455,8 @@ static struct folio *__swap_cache_alloc(struct swap_cluster_info *ci, return ERR_PTR(-ENOMEM); } - /* For memsw accounting, swap is uncharged when folio is added to swap cache */ - memcg1_swapin(entry, 1 << order); + /* memsw uncharges swap when folio is added to swap cache */ + memcg1_swapin(folio); if (shadow) workingset_refault(folio, shadow); diff --git a/mm/swapfile.c b/mm/swapfile.c index 4e5a54769e81..5c8bb15719bf 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1731,7 +1731,7 @@ int folio_alloc_swap(struct folio *folio) } /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ - if (unlikely(mem_cgroup_try_charge_swap(folio, folio->swap))) + if (unlikely(mem_cgroup_try_charge_swap(folio))) swap_cache_del_folio(folio); if (unlikely(!folio_test_swapcache(folio))) diff --git a/mm/vmscan.c b/mm/vmscan.c index b3e555561417..924c84326551 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -737,7 +737,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); - memcg1_swapout(folio, swap); + __memcg1_swapout(folio); __swap_cache_del_folio(ci, folio, swap, shadow); swap_cluster_unlock_irq(ci); } else { -- 2.54.0