When a folio's reference count is decremented but doesn't reach zero, wake up any waiters that might be waiting for the refcount to drop. This enables migration code to wait for transient references to be released instead of busy-retrying. The wake_up_var() calls are gated behind a static key that is disabled by default, so folio_put() compiles to a NOP on the wakeup path when no migration is waiting. The static key is enabled by the migration code in a subsequent commit. Signed-off-by: John Hubbard --- include/linux/mm.h | 8 ++++++++ mm/swap.c | 10 +++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index abb4963c1f06..ccb723412c07 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -38,6 +38,8 @@ #include #include #include +#include +#include struct mempolicy; struct anon_vma; @@ -1798,6 +1800,8 @@ static inline __must_check bool try_get_page(struct page *page) return true; } +DECLARE_STATIC_KEY_FALSE(folio_put_wakeup_key); + /** * folio_put - Decrement the reference count on a folio. * @folio: The folio. @@ -1815,6 +1819,8 @@ static inline void folio_put(struct folio *folio) { if (folio_put_testzero(folio)) __folio_put(folio); + else if (static_branch_unlikely(&folio_put_wakeup_key)) + wake_up_var(&folio->_refcount); } /** @@ -1835,6 +1841,8 @@ static inline void folio_put_refs(struct folio *folio, int refs) { if (folio_ref_sub_and_test(folio, refs)) __folio_put(folio); + else if (static_branch_unlikely(&folio_put_wakeup_key)) + wake_up_var(&folio->_refcount); } void folios_put_refs(struct folio_batch *folios, unsigned int *refs); diff --git a/mm/swap.c b/mm/swap.c index bb19ccbece46..e57baa40129c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -43,6 +43,9 @@ #define CREATE_TRACE_POINTS #include +DEFINE_STATIC_KEY_FALSE(folio_put_wakeup_key); +EXPORT_SYMBOL(folio_put_wakeup_key); + /* How many pages do we try to swap or page in/out together? As a power of 2 */ int page_cluster; static const int page_cluster_max = 31; @@ -968,11 +971,16 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) } if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_folio(folio); + else if (static_branch_unlikely(&folio_put_wakeup_key)) + wake_up_var(&folio->_refcount); continue; } - if (!folio_ref_sub_and_test(folio, nr_refs)) + if (!folio_ref_sub_and_test(folio, nr_refs)) { + if (static_branch_unlikely(&folio_put_wakeup_key)) + wake_up_var(&folio->_refcount); continue; + } /* hugetlb has its own memcg */ if (folio_test_hugetlb(folio)) { -- 2.53.0