From: Nikita Kalyazin Replace set_direct_map_*_noflush with newly available folio_zap_direct_map calls that take folio's address internally. A side effect is even if filemap_add_folio fails, the TLB is still flushed, which is not expected to be on the hot path. Acked-by: David Hildenbrand (Arm) Reviewed-by: Ackerley Tng Signed-off-by: Nikita Kalyazin --- mm/secretmem.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mm/secretmem.c b/mm/secretmem.c index fd29b33c6764..27b176af8fc4 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -53,7 +53,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); pgoff_t offset = vmf->pgoff; gfp_t gfp = vmf->gfp_mask; - unsigned long addr; struct folio *folio; vm_fault_t ret; int err; @@ -72,7 +71,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) goto out; } - err = set_direct_map_invalid_noflush(folio_address(folio)); + err = folio_zap_direct_map(folio); if (err) { folio_put(folio); ret = vmf_error(err); @@ -87,7 +86,7 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) * already happened when we marked the page invalid * which guarantees that this call won't fail */ - set_direct_map_default_noflush(folio_address(folio)); + folio_restore_direct_map(folio); folio_put(folio); if (err == -EEXIST) goto retry; @@ -95,9 +94,6 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf) ret = vmf_error(err); goto out; } - - addr = (unsigned long)folio_address(folio); - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } vmf->page = folio_file_page(folio, vmf->pgoff); -- 2.50.1