The setup_clusters() function could leak 'cluster_info' memory if an error occurred on a path that did not jump to the 'err_free' label. This patch simplifies the error handling by removing the goto label and instead calling free_cluster_info() on all error exit paths. The new logic is safe, as free_cluster_info() already handles NULL pointer inputs. Fixes: 07adc4cf1ecd ("mm, swap: implement dynamic allocation of swap table") Signed-off-by: Youngjun Park Reviewed-by: Kairui Song diff --git a/mm/swapfile.c b/mm/swapfile.c index c35bb8593f50..ed2c8e4edb71 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3324,7 +3324,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); if (!si->global_cluster) - goto err_free; + goto err; for (i = 0; i < SWAP_NR_ORDERS; i++) si->global_cluster->next[i] = SWAP_ENTRY_INVALID; spin_lock_init(&si->global_cluster_lock); @@ -3377,9 +3377,8 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, } return cluster_info; -err_free: - free_cluster_info(cluster_info, maxpages); err: + free_cluster_info(cluster_info, maxpages); return ERR_PTR(err); } -- 2.34.1 The current non rotational check is unreliable as the device's rotational status can be changed by a user via sysfs. Use the more reliable SWP_SOLIDSTATE flag which is set at swapon time, to ensure the nr_rotate_swap count remains consistent. Plus, it is easy to read and simple. Fixes: 81a0298bdfab ("mm, swap: don't use VMA based swap readahead if HDD is used as swap") Signed-off-by: Youngjun Park diff --git a/mm/swapfile.c b/mm/swapfile.c index ed2c8e4edb71..b7cde8ef6742 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2913,7 +2913,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) if (p->flags & SWP_CONTINUED) free_swap_count_continuations(p); - if (!p->bdev || !bdev_nonrot(p->bdev)) + if (!(p->flags & SWP_SOLIDSTATE)) atomic_dec(&nr_rotate_swap); mutex_lock(&swapon_mutex); -- 2.34.1 The function now manages get/put_swap_device() internally, making the comment explaining this behavior to callers unnecessary. Signed-off-by: Youngjun Park diff --git a/mm/swap_state.c b/mm/swap_state.c index b13e9c4baa90..d20d238109f9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -509,10 +509,6 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. - * - * get/put_swap_device() aren't needed to call this function, because - * __read_swap_cache_async() call them and swap_read_folio() holds the - * swap cache folio lock. */ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, -- 2.34.1 swap_alloc_slow() does not need to return a bool, as all callers handle allocation results via the entry parameter. Update the function signature and remove return statements accordingly. Signed-off-by: Youngjun Park Reviewed-by: Kairui Song diff --git a/mm/swapfile.c b/mm/swapfile.c index b7cde8ef6742..4f8a1843262e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1339,7 +1339,7 @@ static bool swap_alloc_fast(swp_entry_t *entry, } /* Rotate the device and switch to a new cluster */ -static bool swap_alloc_slow(swp_entry_t *entry, +static void swap_alloc_slow(swp_entry_t *entry, int order) { unsigned long offset; @@ -1356,10 +1356,10 @@ static bool swap_alloc_slow(swp_entry_t *entry, put_swap_device(si); if (offset) { *entry = swp_entry(si->type, offset); - return true; + return; } if (order) - return false; + return; } spin_lock(&swap_avail_lock); @@ -1378,7 +1378,6 @@ static bool swap_alloc_slow(swp_entry_t *entry, goto start_over; } spin_unlock(&swap_avail_lock); - return false; } /* -- 2.34.1 The scan_swap_map_slots() helper has been removed, but several comments still referred to it in swap allocation and reclaim paths. This patch cleans up those outdated references and reflows the affected comment blocks to match kernel coding style. Signed-off-by: Youngjun Park diff --git a/mm/swapfile.c b/mm/swapfile.c index 4f8a1843262e..543f303f101d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -236,11 +236,10 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, ret = -nr_pages; /* - * When this function is called from scan_swap_map_slots() and it's - * called by vmscan.c at reclaiming folios. So we hold a folio lock - * here. We have to use trylock for avoiding deadlock. This is a special - * case and you should use folio_free_swap() with explicit folio_lock() - * in usual operations. + * We hold a folio lock here. We have to use trylock for + * avoiding deadlock. This is a special case and you should + * use folio_free_swap() with explicit folio_lock() in usual + * operations. */ if (!folio_trylock(folio)) goto out; @@ -1365,14 +1364,13 @@ static void swap_alloc_slow(swp_entry_t *entry, spin_lock(&swap_avail_lock); /* * if we got here, it's likely that si was almost full before, - * and since scan_swap_map_slots() can drop the si->lock, * multiple callers probably all tried to get a page from the * same si and it filled up before we could get one; or, the si - * filled up between us dropping swap_avail_lock and taking - * si->lock. Since we dropped the swap_avail_lock, the - * swap_avail_head list may have been modified; so if next is - * still in the swap_avail_head list then try it, otherwise - * start over if we have not gotten any slots. + * filled up between us dropping swap_avail_lock. + * Since we dropped the swap_avail_lock, the swap_avail_list + * may have been modified; so if next is still in the + * swap_avail_head list then try it, otherwise start over if we + * have not gotten any slots. */ if (plist_node_empty(&si->avail_list)) goto start_over; -- 2.34.1