This the alloc_swap_scan_list() will scan the whole list or the first cluster. This reduces the repeat patterns of isolating a cluster then scanning that cluster. As a result, cluster_alloc_swap_entry() is shorter and shallower. No functional change. Signed-off-by: Chris Li --- This patch goes on top of Kairui's swap improve cluster scan series: https://lore.kernel.org/linux-mm/20250806161748.76651-1-ryncsn@gmail.com/ --- mm/swapfile.c | 86 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 39 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 4a0cf4fb348d..fcb1e57d8108 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -820,6 +820,29 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, return found; } +static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, + struct list_head *list, + unsigned int order, + unsigned char usage, + bool scan_all) +{ + int found = SWAP_ENTRY_INVALID; + + do { + struct swap_cluster_info *ci = isolate_lock_cluster(si, list); + unsigned long offset; + + if (!ci) + break; + offset = cluster_offset(si, ci); + found = alloc_swap_scan_cluster(si, ci, offset, order, usage); + if (found) + return found; + } while (scan_all); + + return found; +} + static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; @@ -913,32 +936,24 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o * to spread out the writes. */ if (si->flags & SWP_PAGE_DISCARD) { - ci = isolate_lock_cluster(si, &si->free_clusters); - if (ci) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - order, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, + false); + if (found) + goto done; } if (order < PMD_ORDER) { - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - order, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], + order, usage, 0); + if (found) + goto done; } if (!(si->flags & SWP_PAGE_DISCARD)) { - ci = isolate_lock_cluster(si, &si->free_clusters); - if (ci) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - order, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, + false); + if (found) + goto done; } /* Try reclaim full clusters if free and nonfull lists are drained */ @@ -952,13 +967,10 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o * failure is not critical. Scanning one cluster still * keeps the list rotated and reclaimed (for HAS_CACHE). */ - ci = isolate_lock_cluster(si, &si->frag_clusters[order]); - if (ci) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - order, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, + usage, true); + if (found) + goto done; } /* @@ -977,19 +989,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o * Clusters here have at least one usable slots and can't fail order 0 * allocation, but reclaim may drop si->lock and race with another user. */ - while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - 0, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->frag_clusters[o], + 0, usage, true); + if (found) + goto done; - while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) { - found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), - 0, usage); - if (found) - goto done; - } + found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], + 0, usage, true); + if (found) + goto done; } done: if (!(si->flags & SWP_SOLIDSTATE)) --- base-commit: f89484324d5876ee10765fa61da0332899fa1a6a change-id: 20250806-swap-scan-list-2b89e3424b0a Best regards, -- Chris Li