There are two meaningless folio refcount update for order0 folio in filemap_map_pages(). First, filemap_map_order0_folio() adds folio refcount after the folio is mapped to pte. And then, filemap_map_pages() drops a refcount grabbed by next_uptodate_folio(). We could remain the refcount unchanged in this case. With this patch, we can get 8% performance gain for lmbench testcase 'lat_pagefault -P 1 file', the size of file is 512M. Signed-off-by: Jinjiang Tu --- mm/filemap.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 751838ef05e5..5de52deab138 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3693,6 +3693,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, } vmf->pte = old_ptep; + folio_unlock(folio); + folio_put(folio); return ret; } @@ -3705,7 +3707,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, struct page *page = &folio->page; if (PageHWPoison(page)) - return ret; + goto out; /* See comment of filemap_map_folio_range() */ if (!folio_test_workingset(folio)) @@ -3717,15 +3719,19 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, * the fault-around logic. */ if (!pte_none(ptep_get(vmf->pte))) - return ret; + goto out; if (vmf->address == addr) ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); (*rss)++; - folio_ref_inc(folio); + folio_unlock(folio); + return ret; +out: + folio_unlock(folio); + folio_put(folio); return ret; } @@ -3783,9 +3789,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, nr_pages, &rss, &mmap_miss); - - folio_unlock(folio); - folio_put(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); add_mm_counter(vma->vm_mm, folio_type, rss); pte_unmap_unlock(vmf->pte, vmf->ptl); -- 2.43.0