From: Kairui Song The newly introduced helper, which checks bad slots and emptiness of a cluster, can cover the older sanity check just fine, with a more rigorous condition check. So merge them. Signed-off-by: Kairui Song --- mm/swapfile.c | 35 +++++++++-------------------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index bdce2abd9135..968153691fc4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -459,9 +459,11 @@ static void swap_table_free(struct swap_table *table) * One special case is that bad slots can't be freed, so check the number of * bad slots for swapoff, and non-swapoff path must never free bad slots. */ -static void swap_cluster_assert_empty(struct swap_cluster_info *ci, bool swapoff) +static void swap_cluster_assert_empty(struct swap_cluster_info *ci, + unsigned int ci_off, unsigned int nr, + bool swapoff) { - unsigned int ci_off = 0, ci_end = SWAPFILE_CLUSTER; + unsigned int ci_end = ci_off + nr; unsigned long swp_tb; int bad_slots = 0; @@ -588,7 +590,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - swap_cluster_assert_empty(ci, false); + swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, false); swap_cluster_free_table(ci); move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; @@ -898,26 +900,6 @@ static bool cluster_scan_range(struct swap_info_struct *si, return true; } -/* - * Currently, the swap table is not used for count tracking, just - * do a sanity check here to ensure nothing leaked, so the swap - * table should be empty upon freeing. - */ -static void swap_cluster_assert_table_empty(struct swap_cluster_info *ci, - unsigned int start, unsigned int nr) -{ - unsigned int ci_off = start % SWAPFILE_CLUSTER; - unsigned int ci_end = ci_off + nr; - unsigned long swp_tb; - - if (IS_ENABLED(CONFIG_DEBUG_VM)) { - do { - swp_tb = __swap_table_get(ci, ci_off); - VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb)); - } while (++ci_off < ci_end); - } -} - static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, struct folio *folio, @@ -943,13 +925,14 @@ static bool cluster_alloc_range(struct swap_info_struct *si, if (likely(folio)) { order = folio_order(folio); nr_pages = 1 << order; + swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, nr_pages, false); __swap_cache_add_folio(ci, folio, swp_entry(si->type, offset)); } else if (IS_ENABLED(CONFIG_HIBERNATION)) { order = 0; nr_pages = 1; WARN_ON_ONCE(si->swap_map[offset]); si->swap_map[offset] = 1; - swap_cluster_assert_table_empty(ci, offset, 1); + swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, 1, false); } else { /* Allocation without folio is only possible with hibernation */ WARN_ON_ONCE(1); @@ -1768,7 +1751,7 @@ void swap_entries_free(struct swap_info_struct *si, mem_cgroup_uncharge_swap(entry, nr_pages); swap_range_free(si, offset, nr_pages); - swap_cluster_assert_table_empty(ci, offset, nr_pages); + swap_cluster_assert_empty(ci, offset % SWAPFILE_CLUSTER, nr_pages, false); if (!ci->count) free_cluster(si, ci); @@ -2769,7 +2752,7 @@ static void free_swap_cluster_info(struct swap_cluster_info *cluster_info, /* Cluster with bad marks count will have a remaining table */ spin_lock(&ci->lock); if (rcu_dereference_protected(ci->table, true)) { - swap_cluster_assert_empty(ci, true); + swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, true); swap_cluster_free_table(ci); } spin_unlock(&ci->lock); -- 2.52.0