Use standard page table accessors i.e pxdp_get() to get the value of pxdp. Signed-off-by: Wei Yang --- include/linux/pgtable.h | 2 +- mm/huge_memory.c | 2 +- mm/memory.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index b13b6f42be3c..a9efd58658bc 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1810,7 +1810,7 @@ static inline int pud_trans_unstable(pud_t *pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) - pud_t pudval = READ_ONCE(*pud); + pud_t pudval = pudp_get(pud); if (pud_none(pudval) || pud_trans_huge(pudval)) return 1; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0d2ac331ccad..dd3577e40d16 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1486,7 +1486,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); ret = 0; - if (pmd_none(*vmf->pmd)) { + if (pmd_none(pmdp_get(vmf->pmd))) { ret = check_stable_address_space(vma->vm_mm); if (ret) { spin_unlock(vmf->ptl); diff --git a/mm/memory.c b/mm/memory.c index 8933069948e5..39839bf0c3f5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6193,7 +6193,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) { pte_t entry; - if (unlikely(pmd_none(*vmf->pmd))) { + if (unlikely(pmd_none(pmdp_get(vmf->pmd)))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table @@ -6309,13 +6309,13 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (!vmf.pud) return VM_FAULT_OOM; retry_pud: - if (pud_none(*vmf.pud) && + if (pud_none(pudp_get(vmf.pud)) && thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PUD_ORDER)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - pud_t orig_pud = *vmf.pud; + pud_t orig_pud = pudp_get(vmf.pud); barrier(); if (pud_trans_huge(orig_pud)) { @@ -6343,7 +6343,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; - if (pmd_none(*vmf.pmd) && + if (pmd_none(pmdp_get(vmf.pmd)) && thp_vma_allowable_order(vma, vm_flags, TVA_PAGEFAULT, PMD_ORDER)) { ret = create_huge_pmd(&vmf); if (ret & VM_FAULT_FALLBACK) -- 2.34.1