The current atomic-based page refcount implementation treats zero counter as dead and requires a compare-and-swap loop in folio_try_get() to prevent incrementing a dead refcount. This CAS loop acts as a serialization point and can become a significant bottleneck during high-frequency file read operations. This patch introduces FOLIO_LOCKED_BIT to distinguish between a (temporary) zero refcount and a locked (dead/frozen) state. Because now incrementing counter doesn't affect it's locked/unlocked state, it is possible to use an optimistic atomic_fetch_add() in page_ref_add_unless_zero() that operates independently of the locked bit. The locked state is handled after the increment attempt, eliminating the need for the CAS loop. Co-developed-by: Gorbunov Ivan Signed-off-by: Gorbunov Ivan Signed-off-by: Gladyshev Ilya --- include/linux/page-flags.h | 5 ++++- include/linux/page_ref.h | 25 +++++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 7c2195baf4c1..f2a9302104eb 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -196,6 +196,9 @@ enum pageflags { #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) +/* Most significant bit in page refcount */ +#define PAGEREF_LOCKED_BIT (1 << 31) + #ifndef __GENERATING_BOUNDS_H #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP @@ -257,7 +260,7 @@ static __always_inline bool page_count_writable(const struct page *page) * The refcount check also prevents modification attempts to other (r/o) * tail pages that are not fake heads. */ - if (!atomic_read_acquire(&page->_refcount)) + if (atomic_read_acquire(&page->_refcount) & PAGEREF_LOCKED_BIT) return false; return page_fixed_fake_head(page) == page; diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index b0e3f4a4b4b8..98717fd25306 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -64,7 +64,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v) static inline int page_ref_count(const struct page *page) { - return atomic_read(&page->_refcount); + int val = atomic_read(&page->_refcount); + + if (unlikely(val & PAGEREF_LOCKED_BIT)) + return 0; + + return val; } /** @@ -176,6 +181,9 @@ static inline int page_ref_sub_and_test(struct page *page, int nr) { int ret = atomic_sub_and_test(nr, &page->_refcount); + if (ret) + ret = !atomic_cmpxchg_relaxed(&page->_refcount, 0, PAGEREF_LOCKED_BIT); + if (page_ref_tracepoint_active(page_ref_mod_and_test)) __page_ref_mod_and_test(page, -nr, ret); return ret; @@ -204,6 +212,9 @@ static inline int page_ref_dec_and_test(struct page *page) { int ret = atomic_dec_and_test(&page->_refcount); + if (ret) + ret = !atomic_cmpxchg_relaxed(&page->_refcount, 0, PAGEREF_LOCKED_BIT); + if (page_ref_tracepoint_active(page_ref_mod_and_test)) __page_ref_mod_and_test(page, -1, ret); return ret; @@ -231,11 +242,17 @@ static inline int folio_ref_dec_return(struct folio *folio) static inline bool page_ref_add_unless_zero(struct page *page, int nr) { bool ret = false; + int val; rcu_read_lock(); /* avoid writing to the vmemmap area being remapped */ - if (page_count_writable(page)) - ret = atomic_add_unless(&page->_refcount, nr, 0); + if (page_count_writable(page)) { + val = atomic_add_return(nr, &page->_refcount); + ret = !(val & PAGEREF_LOCKED_BIT); + + if (unlikely(!ret)) + atomic_cmpxchg_relaxed(&page->_refcount, val, PAGEREF_LOCKED_BIT); + } rcu_read_unlock(); if (page_ref_tracepoint_active(page_ref_mod_unless)) @@ -271,7 +288,7 @@ static inline bool folio_ref_try_add(struct folio *folio, int count) static inline int page_ref_freeze(struct page *page, int count) { - int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); + int ret = likely(atomic_cmpxchg(&page->_refcount, count, PAGEREF_LOCKED_BIT) == count); if (page_ref_tracepoint_active(page_ref_freeze)) __page_ref_freeze(page, count, ret); -- 2.43.0