From: Kairui Song Since commit 0ff67f990bd4 ("mm, swap: remove swap slot cache"), hibernation has been using the swap slot slow allocation path for simplification, which turns out might cause regression for some devices because the allocator now rotates clusters too often, leading to slower allocation and more random distribution of data. Fast allocation is not complex, so implement hibernation support as well. Test result with Samsung SSD 830 Series (SATA II, 3.0 Gbps) shows the performance is several times better [1]: 6.19: 324 seconds After this series: 35 seconds Fixes: 0ff67f990bd4 ("mm, swap: remove swap slot cache") Reported-by: Carsten Grohmann Closes: https://lore.kernel.org/linux-mm/20260206121151.dea3633d1f0ded7bbf49c22e@linux-foundation.org/ Link: https://lore.kernel.org/linux-mm/8b4bdcfa-ce3f-4e23-839f-31367df7c18f@gmx.de/ [1] Cc: stable@vger.kernel.org Signed-off-by: Kairui Song --- mm/swapfile.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index c6863ff7152c..32e0e7545ab8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1926,8 +1926,9 @@ void swap_put_entries_direct(swp_entry_t entry, int nr) /* Allocate a slot for hibernation */ swp_entry_t swap_alloc_hibernation_slot(int type) { - struct swap_info_struct *si = swap_type_to_info(type); - unsigned long offset; + struct swap_info_struct *pcp_si, *si = swap_type_to_info(type); + unsigned long pcp_offset, offset = SWAP_ENTRY_INVALID; + struct swap_cluster_info *ci; swp_entry_t entry = {0}; if (!si) @@ -1937,11 +1938,21 @@ swp_entry_t swap_alloc_hibernation_slot(int type) if (get_swap_device_info(si)) { if (si->flags & SWP_WRITEOK) { /* - * Grab the local lock to be compliant - * with swap table allocation. + * Try the local cluster first if it matches the device. If + * not, try grab a new cluster and override local cluster. */ local_lock(&percpu_swap_cluster.lock); - offset = cluster_alloc_swap_entry(si, NULL); + pcp_si = this_cpu_read(percpu_swap_cluster.si[0]); + pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); + if (pcp_si == si && pcp_offset) { + ci = swap_cluster_lock(si, pcp_offset); + if (cluster_is_usable(ci, 0)) + offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); + else + swap_cluster_unlock(ci); + } + if (!offset) + offset = cluster_alloc_swap_entry(si, NULL); local_unlock(&percpu_swap_cluster.lock); if (offset) entry = swp_entry(si->type, offset); -- 2.52.0 From: Kairui Song It doesn't have to check the device flag, as the allocator will also check the device flag and refuse to allocate if the device is not writable. This might cause a trivial waste of CPU cycles of hibernate allocation raced with swapoff, but that is very unlikely to happen. Removing the check on the common path should be more helpful. Signed-off-by: Kairui Song --- mm/swapfile.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 32e0e7545ab8..0d1b17c99221 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1936,27 +1936,25 @@ swp_entry_t swap_alloc_hibernation_slot(int type) /* This is called for allocating swap entry, not cache */ if (get_swap_device_info(si)) { - if (si->flags & SWP_WRITEOK) { - /* - * Try the local cluster first if it matches the device. If - * not, try grab a new cluster and override local cluster. - */ - local_lock(&percpu_swap_cluster.lock); - pcp_si = this_cpu_read(percpu_swap_cluster.si[0]); - pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); - if (pcp_si == si && pcp_offset) { - ci = swap_cluster_lock(si, pcp_offset); - if (cluster_is_usable(ci, 0)) - offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); - else - swap_cluster_unlock(ci); - } - if (!offset) - offset = cluster_alloc_swap_entry(si, NULL); - local_unlock(&percpu_swap_cluster.lock); - if (offset) - entry = swp_entry(si->type, offset); + /* + * Try the local cluster first if it matches the device. If + * not, try grab a new cluster and override local cluster. + */ + local_lock(&percpu_swap_cluster.lock); + pcp_si = this_cpu_read(percpu_swap_cluster.si[0]); + pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); + if (pcp_si == si && pcp_offset) { + ci = swap_cluster_lock(si, pcp_offset); + if (cluster_is_usable(ci, 0)) + offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); + else + swap_cluster_unlock(ci); } + if (!offset) + offset = cluster_alloc_swap_entry(si, NULL); + local_unlock(&percpu_swap_cluster.lock); + if (offset) + entry = swp_entry(si->type, offset); put_swap_device(si); } fail: -- 2.52.0 From: Kairui Song Almost all callers of the cluster scan helper require the: lock -> check usefulness/emptiness check -> allocate -> unlock routine. So merge them into the same helper to simplify the code. Signed-off-by: Kairui Song --- mm/swapfile.c | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 0d1b17c99221..68dbbbd0dd24 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -923,11 +923,14 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, bool need_reclaim, ret, usable; lockdep_assert_held(&ci->lock); - VM_WARN_ON(!cluster_is_usable(ci, order)); - if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) + if (!cluster_is_usable(ci, order) || end < nr_pages || + ci->count + nr_pages > SWAPFILE_CLUSTER) goto out; + if (cluster_is_empty(ci)) + offset = cluster_offset(si, ci); + for (end -= nr_pages; offset <= end; offset += nr_pages) { need_reclaim = false; if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) @@ -1060,14 +1063,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, goto new_cluster; ci = swap_cluster_lock(si, offset); - /* Cluster could have been used by another order */ - if (cluster_is_usable(ci, order)) { - if (cluster_is_empty(ci)) - offset = cluster_offset(si, ci); - found = alloc_swap_scan_cluster(si, ci, folio, offset); - } else { - swap_cluster_unlock(ci); - } + found = alloc_swap_scan_cluster(si, ci, folio, offset); if (found) goto done; } @@ -1332,14 +1328,7 @@ static bool swap_alloc_fast(struct folio *folio) return false; ci = swap_cluster_lock(si, offset); - if (cluster_is_usable(ci, order)) { - if (cluster_is_empty(ci)) - offset = cluster_offset(si, ci); - alloc_swap_scan_cluster(si, ci, folio, offset); - } else { - swap_cluster_unlock(ci); - } - + alloc_swap_scan_cluster(si, ci, folio, offset); put_swap_device(si); return folio_test_swapcache(folio); } @@ -1945,10 +1934,7 @@ swp_entry_t swap_alloc_hibernation_slot(int type) pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); if (pcp_si == si && pcp_offset) { ci = swap_cluster_lock(si, pcp_offset); - if (cluster_is_usable(ci, 0)) - offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); - else - swap_cluster_unlock(ci); + offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); } if (!offset) offset = cluster_alloc_swap_entry(si, NULL); -- 2.52.0