Instead of passing pointers to adjust_nested_*_perms(), allow them to return a new set of permissions. With some careful moving around so that the canonical permissions are computed before the nested ones are applied, we end-up with a bit less code, and something a bit more readable. Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/kvm/mmu.c | 62 +++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0fbdac77b1140..f4c8f72642e02 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1544,25 +1544,27 @@ static int prepare_mmu_memcache(struct kvm_vcpu *vcpu, bool topup_memcache, * TLB invalidation from the guest and used to limit the invalidation scope if a * TTL hint or a range isn't provided. */ -static void adjust_nested_fault_perms(struct kvm_s2_trans *nested, - enum kvm_pgtable_prot *prot, - bool *writable) +static enum kvm_pgtable_prot adjust_nested_fault_perms(struct kvm_s2_trans *nested, + enum kvm_pgtable_prot prot) { - *writable &= kvm_s2_trans_writable(nested); + if (!kvm_s2_trans_writable(nested)) + prot &= ~KVM_PGTABLE_PROT_W; if (!kvm_s2_trans_readable(nested)) - *prot &= ~KVM_PGTABLE_PROT_R; + prot &= ~KVM_PGTABLE_PROT_R; - *prot |= kvm_encode_nested_level(nested); + return prot | kvm_encode_nested_level(nested); } -static void adjust_nested_exec_perms(struct kvm *kvm, - struct kvm_s2_trans *nested, - enum kvm_pgtable_prot *prot) +static enum kvm_pgtable_prot adjust_nested_exec_perms(struct kvm *kvm, + struct kvm_s2_trans *nested, + enum kvm_pgtable_prot prot) { if (!kvm_s2_trans_exec_el0(kvm, nested)) - *prot &= ~KVM_PGTABLE_PROT_UX; + prot &= ~KVM_PGTABLE_PROT_UX; if (!kvm_s2_trans_exec_el1(kvm, nested)) - *prot &= ~KVM_PGTABLE_PROT_PX; + prot &= ~KVM_PGTABLE_PROT_PX; + + return prot; } struct kvm_s2_fault_desc { @@ -1577,7 +1579,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_trans *nested, struct kvm_memory_slot *memslot, bool is_perm) { - bool write_fault, exec_fault, writable; + bool write_fault, exec_fault; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt; @@ -1614,19 +1616,17 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return ret; } - writable = !(memslot->flags & KVM_MEM_READONLY); + if (!(memslot->flags & KVM_MEM_READONLY)) + prot |= KVM_PGTABLE_PROT_W; if (nested) - adjust_nested_fault_perms(nested, &prot, &writable); - - if (writable) - prot |= KVM_PGTABLE_PROT_W; + prot = adjust_nested_fault_perms(nested, prot); if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) prot |= KVM_PGTABLE_PROT_X; if (nested) - adjust_nested_exec_perms(kvm, nested, &prot); + prot = adjust_nested_exec_perms(kvm, nested, prot); kvm_fault_lock(kvm); if (mmu_invalidate_retry(kvm, mmu_seq)) { @@ -1639,10 +1639,10 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, memcache, flags); out_unlock: - kvm_release_faultin_page(kvm, page, !!ret, writable); + kvm_release_faultin_page(kvm, page, !!ret, prot & KVM_PGTABLE_PROT_W); kvm_fault_unlock(kvm); - if (writable && !ret) + if ((prot & KVM_PGTABLE_PROT_W) && !ret) mark_page_dirty_in_slot(kvm, memslot, gfn); return ret != -EAGAIN ? ret : 0; @@ -1856,16 +1856,6 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd, enum kvm_pgtable_prot *prot) { struct kvm *kvm = s2fd->vcpu->kvm; - bool writable = s2vi->map_writable; - - if (!s2vi->device && memslot_is_logging(s2fd->memslot) && - !kvm_is_write_fault(s2fd->vcpu)) { - /* - * Only actually map the page as writable if this was a write - * fault. - */ - writable = false; - } if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu) && s2vi->map_non_cacheable) return -ENOEXEC; @@ -1883,12 +1873,14 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd, *prot = KVM_PGTABLE_PROT_R; - if (s2fd->nested) - adjust_nested_fault_perms(s2fd->nested, prot, &writable); - - if (writable) + if (s2vi->map_writable && (s2vi->device || + !memslot_is_logging(s2fd->memslot) || + kvm_is_write_fault(s2fd->vcpu))) *prot |= KVM_PGTABLE_PROT_W; + if (s2fd->nested) + *prot = adjust_nested_fault_perms(s2fd->nested, *prot); + if (kvm_vcpu_trap_is_exec_fault(s2fd->vcpu)) *prot |= KVM_PGTABLE_PROT_X; @@ -1899,7 +1891,7 @@ static int kvm_s2_fault_compute_prot(const struct kvm_s2_fault_desc *s2fd, *prot |= KVM_PGTABLE_PROT_X; if (s2fd->nested) - adjust_nested_exec_perms(kvm, s2fd->nested, prot); + *prot = adjust_nested_exec_perms(kvm, s2fd->nested, *prot); if (!kvm_s2_fault_is_perm(s2fd) && !s2vi->map_non_cacheable && kvm_has_mte(kvm)) { /* Check the VMM hasn't introduced a new disallowed VMA */ -- 2.47.3