Optimize the early exit checks in kvm_s2_fault_pin_pfn by grouping all error responses under the generic is_error_noslot_pfn check first, avoiding unnecessary branches in the hot path. Signed-off-by: Fuad Tabba --- arch/arm64/kvm/mmu.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 2d6e749c1756..9265a7fc43f7 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1791,12 +1791,13 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault) fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn, fault->write_fault ? FOLL_WRITE : 0, &fault->writable, &fault->page); - if (fault->pfn == KVM_PFN_ERR_HWPOISON) { - kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize)); - return 0; - } - if (is_error_noslot_pfn(fault->pfn)) + if (unlikely(is_error_noslot_pfn(fault->pfn))) { + if (fault->pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(fault->hva, __ffs(fault->vma_pagesize)); + return 0; + } return -EFAULT; + } return 1; } -- 2.53.0.473.g4a7958ca14-goog