The topup_memcache field can be easily replaced by the equivalent conditions, and the resulting code is not much worse. Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm64/kvm/mmu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index e8bda71e862b2..5b05caecdbd92 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1712,7 +1712,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd, struct kvm_s2_fault { bool writable; - bool topup_memcache; bool mte_allowed; bool is_vma_cacheable; bool s2_force_noncacheable; @@ -1983,9 +1982,8 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd) .logging_active = logging_active, .force_pte = logging_active, .prot = KVM_PGTABLE_PROT_R, - .topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)), }; - void *memcache; + void *memcache = NULL; int ret; /* @@ -1994,9 +1992,11 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd) * only exception to this is when dirty logging is enabled at runtime * and a write fault needs to collapse a block entry into a table. */ - ret = prepare_mmu_memcache(s2fd->vcpu, fault.topup_memcache, &memcache); - if (ret) - return ret; + if (!perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu))) { + ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache); + if (ret) + return ret; + } /* * Let's check if we will get back a huge page backed by hugetlbfs, or -- 2.47.3