Zero swap entries are now treated as a separate, decoupled backend in the virtual swap layer. The zeromap bitmap of physical swapfile is no longer used - remove it. This does not have any behavioral change, and save 1 bit per swap page in terms of memory overhead. Signed-off-by: Nhat Pham --- include/linux/swap.h | 1 - mm/swapfile.c | 30 +++++------------------------- 2 files changed, 5 insertions(+), 26 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 54df972608047..9cd45eab313f8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -260,7 +260,6 @@ struct swap_info_struct { signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ - unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ struct list_head full_clusters; /* full clusters list */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 1aa29dd220f9a..e1cb01b821ff3 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2317,8 +2317,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) static void setup_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, - struct swap_cluster_info *cluster_info, - unsigned long *zeromap) + struct swap_cluster_info *cluster_info) { si->prio = prio; /* @@ -2329,7 +2328,6 @@ static void setup_swap_info(struct swap_info_struct *si, int prio, si->avail_list.prio = -si->prio; si->swap_map = swap_map; si->cluster_info = cluster_info; - si->zeromap = zeromap; } static void _enable_swap_info(struct swap_info_struct *si) @@ -2347,12 +2345,11 @@ static void _enable_swap_info(struct swap_info_struct *si) static void enable_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, - struct swap_cluster_info *cluster_info, - unsigned long *zeromap) + struct swap_cluster_info *cluster_info) { spin_lock(&swap_lock); spin_lock(&si->lock); - setup_swap_info(si, prio, swap_map, cluster_info, zeromap); + setup_swap_info(si, prio, swap_map, cluster_info); spin_unlock(&si->lock); spin_unlock(&swap_lock); /* @@ -2370,7 +2367,7 @@ static void reinsert_swap_info(struct swap_info_struct *si) { spin_lock(&swap_lock); spin_lock(&si->lock); - setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); + setup_swap_info(si, si->prio, si->swap_map, si->cluster_info); _enable_swap_info(si); spin_unlock(&si->lock); spin_unlock(&swap_lock); @@ -2441,7 +2438,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; unsigned char *swap_map; - unsigned long *zeromap; struct swap_cluster_info *cluster_info; struct file *swap_file, *victim; struct address_space *mapping; @@ -2536,8 +2532,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) p->swap_file = NULL; swap_map = p->swap_map; p->swap_map = NULL; - zeromap = p->zeromap; - p->zeromap = NULL; maxpages = p->max; cluster_info = p->cluster_info; p->max = 0; @@ -2549,7 +2543,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) kfree(p->global_cluster); p->global_cluster = NULL; vfree(swap_map); - kvfree(zeromap); free_cluster_info(cluster_info, maxpages); inode = mapping->host; @@ -3013,7 +3006,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; - unsigned long *zeromap = NULL; struct swap_cluster_info *cluster_info = NULL; struct folio *folio = NULL; struct inode *inode = NULL; @@ -3119,17 +3111,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (error) goto bad_swap_unlock_inode; - /* - * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might - * be above MAX_PAGE_ORDER incase of a large swap file. - */ - zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long), - GFP_KERNEL | __GFP_ZERO); - if (!zeromap) { - error = -ENOMEM; - goto bad_swap_unlock_inode; - } - if (si->bdev && bdev_stable_writes(si->bdev)) si->flags |= SWP_STABLE_WRITES; @@ -3196,7 +3177,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) prio = DEF_SWAP_PRIO; if (swap_flags & SWAP_FLAG_PREFER) prio = swap_flags & SWAP_FLAG_PRIO_MASK; - enable_swap_info(si, prio, swap_map, cluster_info, zeromap); + enable_swap_info(si, prio, swap_map, cluster_info); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", K(si->pages), name->name, si->prio, nr_extents, @@ -3224,7 +3205,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) si->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); - kvfree(zeromap); if (cluster_info) free_cluster_info(cluster_info, maxpages); if (inced_nr_rotate_swap) -- 2.47.3