Functioin __tlb_remove_page_size() is only used in tlb_remove_page_size() with @delay_remap set to false and it is passed directly to __tlb_remove_folio_pages_size(). Remove @delay_remap of __tlb_remove_page_size() and call __tlb_remove_folio_pages_size() with false @delay_remap. Signed-off-by: Wei Yang Cc: "David Hildenbrand (Red Hat)" --- arch/s390/include/asm/tlb.h | 6 ++---- include/asm-generic/tlb.h | 5 ++--- mm/mmu_gather.c | 5 ++--- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 1e50f6f1ad9d..0b7b4df94b24 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -24,7 +24,7 @@ static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, bool delay_rmap, int page_size); + struct page *page, int page_size); static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, unsigned int nr_pages, bool delay_rmap); @@ -46,10 +46,8 @@ static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, * s390 doesn't delay rmap removal. */ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, bool delay_rmap, int page_size) + struct page *page, int page_size) { - VM_WARN_ON_ONCE(delay_rmap); - free_folio_and_swap_cache(page_folio(page)); return false; } diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 4d679d2a206b..3975f7d11553 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -287,8 +287,7 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - bool delay_rmap, int page_size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, unsigned int nr_pages, bool delay_rmap); @@ -510,7 +509,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, page, false, page_size)) + if (__tlb_remove_page_size(tlb, page, page_size)) tlb_flush_mmu(tlb); } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7468ec388455..2faa23d7f8d4 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -210,10 +210,9 @@ bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, PAGE_SIZE); } -bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - bool delay_rmap, int page_size) +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); + return __tlb_remove_folio_pages_size(tlb, page, 1, false, page_size); } #endif /* MMU_GATHER_NO_GATHER */ -- 2.34.1