Pure code movement, in preparation for using these macros for VMX as well. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/vmenter.S | 62 ---------------------------------- arch/x86/kvm/vmenter.h | 68 +++++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 63 deletions(-) diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 545dc55582bf..67a05fe9827e 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -41,35 +41,6 @@ "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL .endm -.macro RESTORE_GUEST_SPEC_CTRL_BODY guest_spec_ctrl:req, label:req - /* - * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the - * host's, write the MSR. This is kept out-of-line so that the common - * case does not have to jump. - * - * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, - * there must not be any returns or indirect branches between this code - * and vmentry. - */ -#ifdef CONFIG_X86_64 - mov \guest_spec_ctrl, %rdx - cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx - je \label - movl %edx, %eax - shr $32, %rdx -#else - mov \guest_spec_ctrl, %eax - mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx - xor %eax, %ecx - mov 4 + \guest_spec_ctrl, %edx - mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi - xor %edx, %esi - or %esi, %ecx - je \label -#endif - mov $MSR_IA32_SPEC_CTRL, %ecx - wrmsr -.endm .macro RESTORE_HOST_SPEC_CTRL /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ @@ -77,39 +48,6 @@ "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ "", X86_FEATURE_V_SPEC_CTRL .endm -.macro RESTORE_HOST_SPEC_CTRL_BODY guest_spec_ctrl:req, enter_flags:req, label:req - /* Same for after vmexit. */ - mov $MSR_IA32_SPEC_CTRL, %ecx - - /* - * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, - * if it was not intercepted during guest execution. - */ - testl $KVM_ENTER_SAVE_SPEC_CTRL, \enter_flags - jnz 998f - rdmsr - movl %eax, \guest_spec_ctrl - movl %edx, 4 + \guest_spec_ctrl -998: - /* Now restore the host value of the MSR if different from the guest's. */ -#ifdef CONFIG_X86_64 - mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx - cmp \guest_spec_ctrl, %rdx - je \label - movl %edx, %eax - shr $32, %rdx -#else - mov PER_CPU_VAR(x86_spec_ctrl_current), %eax - mov \guest_spec_ctrl, %esi - xor %eax, %esi - mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx - mov 4 + \guest_spec_ctrl, %edi - xor %edx, %edi - or %edi, %esi - je \label -#endif - wrmsr -.endm #define SVM_CLEAR_CPU_BUFFERS \ ALTERNATIVE "", __CLEAR_CPU_BUFFERS, X86_FEATURE_CLEAR_CPU_BUF_VM diff --git a/arch/x86/kvm/vmenter.h b/arch/x86/kvm/vmenter.h index 29cdae650069..e746e1328d3f 100644 --- a/arch/x86/kvm/vmenter.h +++ b/arch/x86/kvm/vmenter.h @@ -6,4 +6,70 @@ #define KVM_ENTER_SAVE_SPEC_CTRL BIT(1) #define KVM_ENTER_CLEAR_CPU_BUFFERS_FOR_MMIO BIT(2) -#endif /* __KVM_X86_VMENTER_H */ +#ifdef __ASSEMBLER__ +.macro RESTORE_GUEST_SPEC_CTRL_BODY guest_spec_ctrl:req, label:req + /* + * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the + * host's, write the MSR. This is kept out-of-line so that the common + * case does not have to jump. + * + * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, + * there must not be any returns or indirect branches between this code + * and vmentry. + */ +#ifdef CONFIG_X86_64 + mov \guest_spec_ctrl, %rdx + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx + je \label + movl %edx, %eax + shr $32, %rdx +#else + mov \guest_spec_ctrl, %eax + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx + xor %eax, %ecx + mov 4 + \guest_spec_ctrl, %edx + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi + xor %edx, %esi + or %esi, %ecx + je \label +#endif + mov $MSR_IA32_SPEC_CTRL, %ecx + wrmsr +.endm + +.macro RESTORE_HOST_SPEC_CTRL_BODY guest_spec_ctrl:req, enter_flags:req, label:req + /* Same for after vmexit. */ + mov $MSR_IA32_SPEC_CTRL, %ecx + + /* + * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, + * if it was not intercepted during guest execution. + */ + testl $KVM_ENTER_SAVE_SPEC_CTRL, \enter_flags + jnz 998f + rdmsr + movl %eax, \guest_spec_ctrl + movl %edx, 4 + \guest_spec_ctrl +998: + /* Now restore the host value of the MSR if different from the guest's. */ +#ifdef CONFIG_X86_64 + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx + cmp \guest_spec_ctrl, %rdx + je \label + movl %edx, %eax + shr $32, %rdx +#else + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax + mov \guest_spec_ctrl, %esi + xor %eax, %esi + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx + mov 4 + \guest_spec_ctrl, %edi + xor %edx, %edi + or %edi, %esi + je \label +#endif + wrmsr +.endm + +#endif /* __ASSEMBLER__ */ +#endif /* __KVM_X86_ENTER_FLAGS_H */ -- 2.52.0