In preparation for wp support mthp, add offset to start copy for copy_user_large_folio() function. Signed-off-by: Vernon Yang --- include/linux/mm.h | 1 + mm/hugetlb.c | 6 +++--- mm/memory.c | 11 ++++++++--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 80c6673f419e..e178fb1049f7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4006,6 +4006,7 @@ enum mf_action_page_type { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) void folio_zero_user(struct folio *folio, unsigned long addr_hint); int copy_user_large_folio(struct folio *dst, struct folio *src, + unsigned int offset, unsigned long addr_hint, struct vm_area_struct *vma); long copy_folio_from_user(struct folio *dst_folio, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a0d285d20992..91e1ec73f092 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5682,7 +5682,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, break; } ret = copy_user_large_folio(new_folio, pte_folio, - addr, dst_vma); + 0, addr, dst_vma); folio_put(pte_folio); if (ret) { folio_put(new_folio); @@ -6277,7 +6277,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, if (unlikely(ret)) goto out_release_all; - if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { + if (copy_user_large_folio(new_folio, old_folio, 0, vmf->real_address, vma)) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto out_release_all; } @@ -6992,7 +6992,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, *foliop = NULL; goto out; } - ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); + ret = copy_user_large_folio(folio, *foliop, 0, dst_addr, dst_vma); folio_put(*foliop); *foliop = NULL; if (ret) { diff --git a/mm/memory.c b/mm/memory.c index 7b8c7d0f9ff4..3451e6e5aabd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -7071,6 +7071,7 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint) } static int copy_user_gigantic_page(struct folio *dst, struct folio *src, + unsigned int offset, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int nr_pages) @@ -7082,7 +7083,7 @@ static int copy_user_gigantic_page(struct folio *dst, struct folio *src, for (i = 0; i < nr_pages; i++) { dst_page = folio_page(dst, i); - src_page = folio_page(src, i); + src_page = folio_page(src, offset + i); cond_resched(); if (copy_mc_user_highpage(dst_page, src_page, @@ -7095,6 +7096,7 @@ static int copy_user_gigantic_page(struct folio *dst, struct folio *src, struct copy_subpage_arg { struct folio *dst; struct folio *src; + unsigned int offset; struct vm_area_struct *vma; }; @@ -7102,7 +7104,7 @@ static int copy_subpage(unsigned long addr, int idx, void *arg) { struct copy_subpage_arg *copy_arg = arg; struct page *dst = folio_page(copy_arg->dst, idx); - struct page *src = folio_page(copy_arg->src, idx); + struct page *src = folio_page(copy_arg->src, copy_arg->offset + idx); if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) return -EHWPOISON; @@ -7110,17 +7112,20 @@ static int copy_subpage(unsigned long addr, int idx, void *arg) } int copy_user_large_folio(struct folio *dst, struct folio *src, + unsigned int offset, unsigned long addr_hint, struct vm_area_struct *vma) { unsigned int nr_pages = folio_nr_pages(dst); struct copy_subpage_arg arg = { .dst = dst, .src = src, + .offset = offset, .vma = vma, }; if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) - return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); + return copy_user_gigantic_page(dst, src, offset, addr_hint, + vma, nr_pages); return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg); } -- 2.50.1