Call nested_svm_merge_msrpm() from enter_svm_guest_mode() if called from the VMRUN path, instead of making the call in nested_svm_vmrun(). This simplifies the flow of nested_svm_vmrun() and removes all jumps to cleanup labels. Signed-off-by: Yosry Ahmed --- arch/x86/kvm/svm/nested.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index a48668c36a191..89830380cebc5 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1020,6 +1020,9 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, bool from_vmrun) nested_svm_hv_update_vm_vp_ids(vcpu); + if (from_vmrun && !nested_svm_merge_msrpm(vcpu)) + return -1; + return 0; } @@ -1105,23 +1108,18 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu) svm->nested.nested_run_pending = 1; - if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true)) - goto out_exit_err; - - if (nested_svm_merge_msrpm(vcpu)) - return ret; - -out_exit_err: - svm->nested.nested_run_pending = 0; - svm->nmi_l1_to_l2 = false; - svm->soft_int_injected = false; + if (enter_svm_guest_mode(vcpu, vmcb12_gpa, true)) { + svm->nested.nested_run_pending = 0; + svm->nmi_l1_to_l2 = false; + svm->soft_int_injected = false; - svm->vmcb->control.exit_code = SVM_EXIT_ERR; - svm->vmcb->control.exit_code_hi = 0; - svm->vmcb->control.exit_info_1 = 0; - svm->vmcb->control.exit_info_2 = 0; + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + svm->vmcb->control.exit_code_hi = 0; + svm->vmcb->control.exit_info_1 = 0; + svm->vmcb->control.exit_info_2 = 0; - nested_svm_vmexit(svm); + nested_svm_vmexit(svm); + } return ret; } -- 2.51.2.1041.gc1ab5b90ca-goog