Some error path neglects to free the allocated 'cluster_info', causing a memory leak. Change the error jumps to the 'err_free' label to ensure proper cleanup. Fixes: 07adc4cf1ecd ("mm, swap: implement dynamic allocation of swap table") Signed-off-by: Youngjun Park --- mm/swapfile.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index c35bb8593f50..6dc0e7a738bc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3339,7 +3339,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, */ err = swap_cluster_setup_bad_slot(cluster_info, 0); if (err) - goto err; + goto err_free; for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; @@ -3347,12 +3347,12 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, continue; err = swap_cluster_setup_bad_slot(cluster_info, page_nr); if (err) - goto err; + goto err_free; } for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) { err = swap_cluster_setup_bad_slot(cluster_info, i); if (err) - goto err; + goto err_free; } INIT_LIST_HEAD(&si->free_clusters); -- 2.34.1 The current non rotational check is unreliable as the device's rotational status can be changed by a user via sysfs. Use the more reliable SWP_SOLIDSTATE flag which is set at swapon time, to ensure the nr_rotate_swap count remains consistent. Plus, it is easy to read and simple. Signed-off-by: Youngjun Park --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 6dc0e7a738bc..b5d42918c01b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2913,7 +2913,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (p->flags & SWP_CONTINUED) free_swap_count_continuations(p); - if (!p->bdev || !bdev_nonrot(p->bdev)) + if (!(p->flags & SWP_SOLIDSTATE)) atomic_dec(&nr_rotate_swap); mutex_lock(&swapon_mutex); -- 2.34.1 The function now manages get/put_swap_device() internally, making the comment explaining this behavior to callers unnecessary. Signed-off-by: Youngjun Park --- mm/swap_state.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index b13e9c4baa90..d20d238109f9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -509,10 +509,6 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. - * - * get/put_swap_device() aren't needed to call this function, because - * __read_swap_cache_async() call them and swap_read_folio() holds the - * swap cache folio lock. */ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, -- 2.34.1 swap_alloc_slow() does not need to return a bool, as all callers handle allocation results via the entry parameter. Update the function signature and remove return statements accordingly. Signed-off-by: Youngjun Park --- mm/swapfile.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index b5d42918c01b..89eb57eee7f7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1339,7 +1339,7 @@ static bool swap_alloc_fast(swp_entry_t *entry, } /* Rotate the device and switch to a new cluster */ -static bool swap_alloc_slow(swp_entry_t *entry, +static void swap_alloc_slow(swp_entry_t *entry, int order) { unsigned long offset; @@ -1356,10 +1356,10 @@ static bool swap_alloc_slow(swp_entry_t *entry, put_swap_device(si); if (offset) { *entry = swp_entry(si->type, offset); - return true; + return; } if (order) - return false; + return; } spin_lock(&swap_avail_lock); @@ -1378,7 +1378,6 @@ static bool swap_alloc_slow(swp_entry_t *entry, goto start_over; } spin_unlock(&swap_avail_lock); - return false; } /* -- 2.34.1 The scan_swap_map_slots() helper has been removed, but several comments still referred to it in swap allocation and reclaim paths. This patch cleans up those outdated references and reflows the affected comment blocks to match kernel coding style. Signed-off-by: Youngjun Park --- mm/swapfile.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 89eb57eee7f7..1dace4356bd1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -236,11 +236,10 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, ret = -nr_pages; /* - * When this function is called from scan_swap_map_slots() and it's - * called by vmscan.c at reclaiming folios. So we hold a folio lock - * here. We have to use trylock for avoiding deadlock. This is a special - * case and you should use folio_free_swap() with explicit folio_lock() - * in usual operations. + * We hold a folio lock here. We have to use trylock for + * avoiding deadlock. This is a special case and you should + * use folio_free_swap() with explicit folio_lock() in usual + * operations. */ if (!folio_trylock(folio)) goto out; @@ -1365,14 +1364,13 @@ static void swap_alloc_slow(swp_entry_t *entry, spin_lock(&swap_avail_lock); /* * if we got here, it's likely that si was almost full before, - * and since scan_swap_map_slots() can drop the si->lock, * multiple callers probably all tried to get a page from the * same si and it filled up before we could get one; or, the si - * filled up between us dropping swap_avail_lock and taking - * si->lock. Since we dropped the swap_avail_lock, the - * swap_avail_head list may have been modified; so if next is - * still in the swap_avail_head list then try it, otherwise - * start over if we have not gotten any slots. + * filled up between us dropping swap_avail_lock. + * Since we dropped the swap_avail_lock, the swap_avail_list + * may have been modified; so if next is still in the + * swap_avail_head list then try it, otherwise start over if we + * have not gotten any slots. */ if (plist_node_empty(&si->avail_list)) goto start_over; -- 2.34.1