Large folio swapin has to know whether a contiguous swap range is backed consistently by zswap. A range that is partly in zswap and partly on disk cannot be read by the existing whole-folio swap_read_folio() backend selection. Add zswap_entry_batch(), mirroring the existing zeromap batch query: it returns how many entries starting at a swap entry share the same zswap presence, and optionally reports the first entry's state. The CONFIG_ZSWAP=n stub reports that the whole range is not in zswap. Signed-off-by: fujunjie --- include/linux/zswap.h | 9 +++++++++ mm/zswap.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 30c193a1207e..b9d71f027200 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -27,6 +27,7 @@ struct zswap_lruvec_state { unsigned long zswap_total_pages(void); bool zswap_store(struct folio *folio); int zswap_load(struct folio *folio); +int zswap_entry_batch(swp_entry_t swp, int max_nr, bool *is_zswap); void zswap_invalidate(swp_entry_t swp); int zswap_swapon(int type, unsigned long nr_pages); void zswap_swapoff(int type); @@ -49,6 +50,14 @@ static inline int zswap_load(struct folio *folio) return -ENOENT; } +static inline int zswap_entry_batch(swp_entry_t swp, int max_nr, + bool *is_zswap) +{ + if (is_zswap) + *is_zswap = false; + return max_nr; +} + static inline void zswap_invalidate(swp_entry_t swp) {} static inline int zswap_swapon(int type, unsigned long nr_pages) { diff --git a/mm/zswap.c b/mm/zswap.c index afe38dfc5a29..27c14b8edd15 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -234,6 +234,42 @@ static inline struct xarray *swap_zswap_tree(swp_entry_t swp) >> ZSWAP_ADDRESS_SPACE_SHIFT]; } +/* + * Return the number of contiguous swap entries that share the same zswap + * presence as @swp. If @is_zswap is not NULL, return @swp's zswap status. + * + * Context: callers must keep the swap type alive. The result is a snapshot + * of zswap xarray presence; callers must tolerate races by rechecking under + * the lock that matters for their operation or by falling back safely. + */ +int zswap_entry_batch(swp_entry_t swp, int max_nr, bool *is_zswap) +{ + pgoff_t offset = swp_offset(swp); + bool first; + int i; + + if (zswap_never_enabled()) { + if (is_zswap) + *is_zswap = false; + return max_nr; + } + + first = !!xa_load(swap_zswap_tree(swp), offset); + if (is_zswap) + *is_zswap = first; + + for (i = 1; i < max_nr; i++) { + swp_entry_t entry = swp_entry(swp_type(swp), offset + i); + bool present; + + present = !!xa_load(swap_zswap_tree(entry), offset + i); + if (present != first) + return i; + } + + return max_nr; +} + #define zswap_pool_debug(msg, p) \ pr_debug("%s pool %s\n", msg, (p)->tfm_name) -- 2.34.1