Fake heads are no longer in use, so checks for them should be removed. It simplifies compound_head() and page_ref_add_unless() substantially. Signed-off-by: Kiryl Shutsemau --- include/linux/page-flags.h | 95 ++------------------------------------ include/linux/page_ref.h | 8 +--- 2 files changed, 4 insertions(+), 99 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index eef02fbbb40f..8acb141a127b 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -198,104 +198,15 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); -/* - * Return the real head page struct iff the @page is a fake head page, otherwise - * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. - */ -static __always_inline const struct page *page_fixed_fake_head(const struct page *page) -{ - if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) - return page; - - /* - * Fake heads only exists if size of struct page is power-of-2. - * See hugetlb_vmemmap_optimizable_size(). - */ - if (!is_power_of_2(sizeof(struct page))) - return page; - - /* - * Only addresses aligned with PAGE_SIZE of struct page may be fake head - * struct page. The alignment check aims to avoid access the fields ( - * e.g. compound_info) of the @page[1]. It can avoid touch a (possibly) - * cold cacheline in some cases. - */ - if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && - test_bit(PG_head, &page->flags.f)) { - /* - * We can safely access the field of the @page[1] with PG_head - * because the @page is a compound page composed with at least - * two contiguous pages. - */ - unsigned long info = READ_ONCE(page[1].compound_info); - - if (likely(info & 1)) { - unsigned long p = (unsigned long)page; - - return (const struct page *)(p & info); - } - } - return page; -} - -static __always_inline bool page_count_writable(const struct page *page, int u) -{ - if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) - return true; - - /* - * The refcount check is ordered before the fake-head check to prevent - * the following race: - * CPU 1 (HVO) CPU 2 (speculative PFN walker) - * - * page_ref_freeze() - * synchronize_rcu() - * rcu_read_lock() - * page_is_fake_head() is false - * vmemmap_remap_pte() - * XXX: struct page[] becomes r/o - * - * page_ref_unfreeze() - * page_ref_count() is not zero - * - * atomic_add_unless(&page->_refcount) - * XXX: try to modify r/o struct page[] - * - * The refcount check also prevents modification attempts to other (r/o) - * tail pages that are not fake heads. - */ - if (atomic_read_acquire(&page->_refcount) == u) - return false; - - return page_fixed_fake_head(page) == page; -} -#else -static inline const struct page *page_fixed_fake_head(const struct page *page) -{ - return page; -} - -static inline bool page_count_writable(const struct page *page, int u) -{ - return true; -} -#endif - -static __always_inline int page_is_fake_head(const struct page *page) -{ - return page_fixed_fake_head(page) != page; -} - static __always_inline unsigned long _compound_head(const struct page *page) { unsigned long info = READ_ONCE(page->compound_info); /* Bit 0 encodes PageTail() */ if (!(info & 1)) - return (unsigned long)page_fixed_fake_head(page); + return (unsigned long)page; /* * If the size of struct page is not power-of-2, the rest if @@ -377,7 +288,7 @@ static __always_inline void clear_compound_head(struct page *page) static __always_inline int PageTail(const struct page *page) { - return READ_ONCE(page->compound_info) & 1 || page_is_fake_head(page); + return READ_ONCE(page->compound_info) & 1; } static __always_inline int PageCompound(const struct page *page) @@ -904,7 +815,7 @@ static __always_inline bool folio_test_head(const struct folio *folio) static __always_inline int PageHead(const struct page *page) { PF_POISONED_CHECK(page); - return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page); + return test_bit(PG_head, &page->flags.f); } __SETPAGEFLAG(Head, head, PF_ANY) diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 544150d1d5fd..490d0ad6e56d 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -230,13 +230,7 @@ static inline int folio_ref_dec_return(struct folio *folio) static inline bool page_ref_add_unless(struct page *page, int nr, int u) { - bool ret = false; - - rcu_read_lock(); - /* avoid writing to the vmemmap area being remapped */ - if (page_count_writable(page, u)) - ret = atomic_add_unless(&page->_refcount, nr, u); - rcu_read_unlock(); + bool ret = atomic_add_unless(&page->_refcount, nr, u); if (page_ref_tracepoint_active(page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); -- 2.51.2