Instead of 0 and 1 let coalesced_mmio_in_range() return false or true. Signed-off-by: Juergen Gross --- virt/kvm/coalesced_mmio.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index 375d6285475e..bc022bd45863 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -22,22 +22,22 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } -static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, - gpa_t addr, int len) +static bool coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, + gpa_t addr, int len) { /* is it in a batchable area ? * (addr,len) is fully included in * (zone->addr, zone->size) */ if (len < 0) - return 0; + return false; if (addr + len < addr) - return 0; + return false; if (addr < dev->zone.addr) - return 0; + return false; if (addr + len > dev->zone.addr + dev->zone.size) - return 0; - return 1; + return false; + return true; } static int coalesced_mmio_write(struct kvm_vcpu *vcpu, -- 2.51.0 The err parameter of kvm_complete_insn_gp() is just a flag, so bool is the appropriate type for it. Just converting the function itself for now is fine, as the implicit conversion from int to bool is doing the right thing. Most callers will be changed later to use bool, too. As vmx_complete_emulated_msr is defined to kvm_complete_insn_gp, the same parameter modification should be applied to vt_complete_emulated_msr(), tdx_complete_emulated_msr() and svm_complete_emulated_msr(). No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/include/asm/kvm_host.h | 4 ++-- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/main.c | 2 +- arch/x86/kvm/vmx/tdx.c | 2 +- arch/x86/kvm/vmx/tdx.h | 2 +- arch/x86/kvm/x86.c | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 48598d017d6f..2b289708b56b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1930,7 +1930,7 @@ struct kvm_x86_ops { void (*migrate_timers)(struct kvm_vcpu *vcpu); void (*recalc_intercepts)(struct kvm_vcpu *vcpu); - int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err); + int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, bool err); void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector); @@ -2409,7 +2409,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu); extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); -int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); +int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, bool err); void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9d29b2e7e855..f354cf0b6c1c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2777,7 +2777,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; } -static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) +static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, bool err) { struct vcpu_svm *svm = to_svm(vcpu); if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0eb2773b2ae2..2f1b9a75fe47 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -202,7 +202,7 @@ static void vt_recalc_intercepts(struct kvm_vcpu *vcpu) vmx_recalc_intercepts(vcpu); } -static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) +static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, bool err) { if (is_td_vcpu(vcpu)) return tdx_complete_emulated_msr(vcpu, err); diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 0a49c863c811..6b99c8dbd8cc 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -2028,7 +2028,7 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu) return ret; } -int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) +int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, bool err) { if (err) { tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND); diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h index ca39a9391db1..19a066868d45 100644 --- a/arch/x86/kvm/vmx/tdx.h +++ b/arch/x86/kvm/vmx/tdx.h @@ -174,7 +174,7 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \ bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu); -int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err); +int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, bool err); TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs); TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c9c2aa6f4705..f7c84d9ea9de 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -950,7 +950,7 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr, } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_requeue_exception); -int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) +int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, bool err) { if (err) kvm_inject_gp(vcpu, 0); -- 2.51.0 Today x86_emulate_ops.set_cr() is returning "0" for okay and "1" in case of an error. In order to get rid of the literal numbers, use bool as return type for x86_emulate_ops.set_cr() and related sub-functions. Note that in the case of an illegal cr specified emulator_set_cr() will return -1. As all callers are either ignoring the return value or are testing only for 0, returning just true for that case is fine. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/include/asm/kvm_host.h | 8 ++--- arch/x86/kvm/kvm_emulate.h | 2 +- arch/x86/kvm/smm.c | 2 +- arch/x86/kvm/svm/svm.c | 6 ++-- arch/x86/kvm/vmx/vmx.c | 14 ++++----- arch/x86/kvm/x86.c | 54 ++++++++++++++++----------------- 6 files changed, 43 insertions(+), 43 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2b289708b56b..7997ed36de38 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2203,10 +2203,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0); void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4); -int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); -int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); +bool kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); +bool kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); +bool kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +bool kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 7b5ddb787a25..efc64396d717 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -209,7 +209,7 @@ struct x86_emulate_ops { void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); - int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); + bool (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); ulong (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr); int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index f623c5986119..87af91f4f9f2 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -419,7 +419,7 @@ static int rsm_load_seg_64(struct kvm_vcpu *vcpu, static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, u64 cr0, u64 cr3, u64 cr4) { - int bad; + bool bad; u64 pcid; /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f354cf0b6c1c..98bd087dda9c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2448,7 +2448,7 @@ static int cr_interception(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); int reg, cr; unsigned long val; - int err; + bool err; if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(vcpu); @@ -2462,7 +2462,7 @@ static int cr_interception(struct kvm_vcpu *vcpu) else cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; - err = 0; + err = false; if (cr >= 16) { /* mov to cr */ cr -= 16; val = kvm_register_read(vcpu, reg); @@ -2522,7 +2522,7 @@ static int cr_trap(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); unsigned long old_value, new_value; unsigned int cr; - int ret = 0; + bool ret = false; new_value = (unsigned long)svm->vmcb->control.exit_info_1; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 91b6f2f3edc2..ff0c684d9c28 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5394,7 +5394,7 @@ void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ -static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) +static bool handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -5412,15 +5412,15 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (kvm_set_cr0(vcpu, val)) - return 1; + return true; vmcs_writel(CR0_READ_SHADOW, orig_val); - return 0; + return false; } else { return kvm_set_cr0(vcpu, val); } } -static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) +static bool handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); @@ -5430,9 +5430,9 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) - return 1; + return true; vmcs_writel(CR4_READ_SHADOW, orig_val); - return 0; + return false; } else return kvm_set_cr4(vcpu, val); } @@ -5454,7 +5454,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) unsigned long exit_qualification, val; int cr; int reg; - int err; + bool err; int ret; exit_qualification = vmx_get_exit_qual(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f7c84d9ea9de..462954633c1d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1153,12 +1153,12 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr0); -int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) +bool kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); if (!kvm_is_valid_cr0(vcpu, cr0)) - return 1; + return true; cr0 |= X86_CR0_ET; @@ -1171,29 +1171,29 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) int cs_db, cs_l; if (!is_pae(vcpu)) - return 1; + return true; kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); if (cs_l) - return 1; + return true; } #endif if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) - return 1; + return true; if (!(cr0 & X86_CR0_PG) && (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) - return 1; + return true; if (!(cr0 & X86_CR0_WP) && kvm_is_cr4_bit_set(vcpu, X86_CR4_CET)) - return 1; + return true; kvm_x86_call(set_cr0)(vcpu, cr0); kvm_post_set_cr0(vcpu, old_cr0, cr0); - return 0; + return false; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr0); @@ -1366,37 +1366,37 @@ void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned lon } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr4); -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +bool kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); if (!kvm_is_valid_cr4(vcpu, cr4)) - return 1; + return true; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) - return 1; + return true; if ((cr4 ^ old_cr4) & X86_CR4_LA57) - return 1; + return true; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS) && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) - return 1; + return true; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) - return 1; + return true; } if ((cr4 & X86_CR4_CET) && !kvm_is_cr0_bit_set(vcpu, X86_CR0_WP)) - return 1; + return true; kvm_x86_call(set_cr4)(vcpu, cr4); kvm_post_set_cr4(vcpu, old_cr4, cr4); - return 0; + return false; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr4); @@ -1443,7 +1443,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); } -int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +bool kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { bool skip_tlb_flush = false; unsigned long pcid = 0; @@ -1465,10 +1465,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) * the current vCPU mode is accurate. */ if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) - return 1; + return true; if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) - return 1; + return true; if (cr3 != kvm_read_cr3(vcpu)) kvm_mmu_new_pgd(vcpu, cr3); @@ -1488,19 +1488,19 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (!skip_tlb_flush) kvm_invalidate_pcid(vcpu, pcid); - return 0; + return false; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr3); -int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) +bool kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) - return 1; + return true; if (lapic_in_kernel(vcpu)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; - return 0; + return false; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr8); @@ -8571,10 +8571,10 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) return value; } -static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) +static bool emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - int res = 0; + bool res = false; switch (cr) { case 0: @@ -8594,7 +8594,7 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); - res = -1; + res = true; } return res; @@ -11912,7 +11912,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* re-sync apic's tpr */ if (!lapic_in_kernel(vcpu)) { - if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { + if (kvm_set_cr8(vcpu, kvm_run->cr8)) { r = -EINVAL; goto out; } -- 2.51.0 Today x86_emulate_ops.set_dr() is returning "0" for okay and "1" in case of an error. In order to get rid of the literal numbers, use bool as return type for x86_emulate_ops.set_dr() and related sub-functions. As x86_emulate_ops.set_dr() only ever returns 0 or 1, the test for its return value less than 0 in em_dr_write() seems to be a bug. It should probably be just a test for not 0. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/emulate.c | 2 +- arch/x86/kvm/kvm_emulate.h | 2 +- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 12 ++++++------ 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 7997ed36de38..acb8353b97a4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2207,7 +2207,7 @@ bool kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); bool kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); bool kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); bool kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); -int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); +bool kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 4e3da5b497b8..f5cae7335c26 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3280,7 +3280,7 @@ static int em_dr_write(struct x86_emulate_ctxt *ctxt) val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ - if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) + if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index efc64396d717..eafc91207b45 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -212,7 +212,7 @@ struct x86_emulate_ops { bool (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); int (*cpl)(struct x86_emulate_ctxt *ctxt); ulong (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr); - int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); + bool (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 98bd087dda9c..7cbf4d686415 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2556,7 +2556,7 @@ static int dr_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); int reg, dr; - int err = 0; + bool err = false; /* * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ff0c684d9c28..365c4ce283e5 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5533,7 +5533,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; - int err = 1; + bool err = true; exit_qualification = vmx_get_exit_qual(vcpu); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; @@ -5580,7 +5580,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { kvm_register_write(vcpu, reg, kvm_get_dr(vcpu, dr)); - err = 0; + err = false; } else { err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 462954633c1d..e733cb923312 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1550,7 +1550,7 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) return fixed; } -int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) +bool kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { size_t size = ARRAY_SIZE(vcpu->arch.db); @@ -1563,19 +1563,19 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) case 4: case 6: if (!kvm_dr6_valid(val)) - return 1; /* #GP */ + return true; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); break; case 5: default: /* 7 */ if (!kvm_dr7_valid(val)) - return 1; /* #GP */ + return true; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } - return 0; + return false; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_dr); @@ -8530,8 +8530,8 @@ static unsigned long emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr) return kvm_get_dr(emul_to_vcpu(ctxt), dr); } -static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, - unsigned long value) +static bool emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long value) { return kvm_set_dr(emul_to_vcpu(ctxt), dr, value); -- 2.51.0 For MSR emulation return values only 2 special cases have defines, while the most used values 0 and 1 don't. Reason seems to be the maze of function calls of MSR emulation intertwined with the KVM guest exit handlers, which are using the values 0 and 1 for other purposes. This even led to the comment above the already existing defines, warning to use the values 0 and 1 (and negative errno values) in the MSR emulation at all. Fact is that MSR emulation and exit handlers are in fact rather well distinct, with only very few exceptions which are handled in a sane way. So add defines for 0 and 1 values of MSR emulation and at the same time comments where exit handlers are calling into MSR emulation. The new defines will be used later. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/x86.c | 2 ++ arch/x86/kvm/x86.h | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e733cb923312..e87963a47aa5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2130,6 +2130,7 @@ static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg, u64 data; int r; + /* Call MSR emulation. */ r = kvm_emulate_msr_read(vcpu, msr, &data); if (!r) { @@ -2171,6 +2172,7 @@ static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int r; + /* Call MSR emulation. */ r = kvm_emulate_msr_write(vcpu, msr, data); if (!r) { trace_kvm_msr_write(msr, data); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index f3dc77f006f9..e44b6373b106 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -639,15 +639,21 @@ enum kvm_msr_access { /* * Internal error codes that are used to indicate that MSR emulation encountered * an error that should result in #GP in the guest, unless userspace handles it. - * Note, '1', '0', and negative numbers are off limits, as they are used by KVM - * as part of KVM's lightly documented internal KVM_RUN return codes. + * Note, negative errno values are possible for return values, too. + * In case MSR emulation is called from an exit handler, any return value other + * than KVM_MSR_RET_OK will normally result in a GP in the guest. * + * OK - Emulation succeeded. Must be 0, as in some cases return values + * of functions returning 0 or -errno will just be passed on. + * ERR - Some error occurred. * UNSUPPORTED - The MSR isn't supported, either because it is completely * unknown to KVM, or because the MSR should not exist according * to the vCPU model. * * FILTERED - Access to the MSR is denied by a userspace MSR filter. */ +#define KVM_MSR_RET_OK 0 +#define KVM_MSR_RET_ERR 1 #define KVM_MSR_RET_UNSUPPORTED 2 #define KVM_MSR_RET_FILTERED 3 -- 2.51.0 Instead of "0" and "1" use the related KVM_MSR_RET_* defines in the emulation code of APIC related MSR registers. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/lapic.c | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0ae7f913d782..bd4c0768b270 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2339,7 +2339,7 @@ static int get_lvt_index(u32 reg) static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) { - int ret = 0; + int ret = KVM_MSR_RET_OK; trace_kvm_apic_write(reg, val); @@ -2348,7 +2348,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) if (!apic_x2apic_mode(apic)) { kvm_apic_set_xapic_id(apic, val >> 24); } else { - ret = 1; + ret = KVM_MSR_RET_ERR; } break; @@ -2365,14 +2365,14 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) if (!apic_x2apic_mode(apic)) kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); else - ret = 1; + ret = KVM_MSR_RET_ERR; break; case APIC_DFR: if (!apic_x2apic_mode(apic)) kvm_apic_set_dfr(apic, val | 0x0FFFFFFF); else - ret = 1; + ret = KVM_MSR_RET_ERR; break; case APIC_SPIV: { @@ -2403,7 +2403,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; case APIC_ICR2: if (apic_x2apic_mode(apic)) - ret = 1; + ret = KVM_MSR_RET_ERR; else kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000); break; @@ -2418,7 +2418,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_LVTCMCI: { u32 index = get_lvt_index(reg); if (!kvm_lapic_lvt_supported(apic, index)) { - ret = 1; + ret = KVM_MSR_RET_ERR; break; } if (!kvm_apic_sw_enabled(apic)) @@ -2460,7 +2460,7 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) } case APIC_ESR: if (apic_x2apic_mode(apic) && val != 0) - ret = 1; + ret = KVM_MSR_RET_ERR; break; case APIC_SELF_IPI: @@ -2469,12 +2469,12 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) * the vector, everything else is reserved. */ if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK)) - ret = 1; + ret = KVM_MSR_RET_ERR; else kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0); break; default: - ret = 1; + ret = KVM_MSR_RET_ERR; break; } @@ -2532,7 +2532,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_set_eoi); static int __kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data, bool fast) { if (data & X2APIC_ICR_RESERVED_BITS) - return 1; + return KVM_MSR_RET_ERR; /* * The BUSY bit is reserved on both Intel and AMD in x2APIC mode, but @@ -2565,7 +2565,7 @@ static int __kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data, bool fast) kvm_lapic_set_reg64(apic, APIC_ICR, data); } trace_kvm_apic_write(APIC_ICR, data); - return 0; + return KVM_MSR_RET_OK; } static int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data) @@ -2728,23 +2728,23 @@ int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated) enum lapic_mode new_mode = kvm_apic_mode(value); if (vcpu->arch.apic_base == value) - return 0; + return KVM_MSR_RET_OK; u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | (guest_cpu_cap_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); if ((value & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) - return 1; + return KVM_MSR_RET_ERR; if (!host_initiated) { if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC) - return 1; + return KVM_MSR_RET_ERR; if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC) - return 1; + return KVM_MSR_RET_ERR; } __kvm_apic_set_base(vcpu, value); kvm_recalculate_apic_map(vcpu->kvm); - return 0; + return KVM_MSR_RET_OK; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_set_base); @@ -3362,15 +3362,15 @@ static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data) if (reg == APIC_ICR) { *data = kvm_x2apic_icr_read(apic); - return 0; + return KVM_MSR_RET_OK; } if (kvm_lapic_reg_read(apic, reg, 4, &low)) - return 1; + return KVM_MSR_RET_ERR; *data = low; - return 0; + return KVM_MSR_RET_OK; } static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data) @@ -3385,7 +3385,7 @@ static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data) /* Bits 63:32 are reserved in all other registers. */ if (data >> 32) - return 1; + return KVM_MSR_RET_ERR; return kvm_lapic_reg_write(apic, reg, (u32)data); } @@ -3396,7 +3396,7 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) u32 reg = (msr - APIC_BASE_MSR) << 4; if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) - return 1; + return KVM_MSR_RET_ERR; return kvm_lapic_msr_write(apic, reg, data); } @@ -3407,7 +3407,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) u32 reg = (msr - APIC_BASE_MSR) << 4; if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) - return 1; + return KVM_MSR_RET_ERR; return kvm_lapic_msr_read(apic, reg, data); } @@ -3415,7 +3415,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) { if (!lapic_in_kernel(vcpu)) - return 1; + return KVM_MSR_RET_ERR; return kvm_lapic_msr_write(vcpu->arch.apic, reg, data); } @@ -3423,7 +3423,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) { if (!lapic_in_kernel(vcpu)) - return 1; + return KVM_MSR_RET_ERR; return kvm_lapic_msr_read(vcpu->arch.apic, reg, data); } -- 2.51.0 Instead of "0" and "1" use the related KVM_MSR_RET_* defines in the emulation code of Hyper-V related MSR registers. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/hyperv.c | 110 +++++++++++++++++++++--------------------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 38595ecb990d..7cc6d47becb5 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -167,7 +167,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, * allow zero-initing the register from host as well. */ if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked) - return 1; + return KVM_MSR_RET_ERR; /* * Guest may configure multiple SINTs to use the same vector, so * we maintain a bitmap of vectors handled by synic, and a @@ -184,7 +184,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, /* Load SynIC vectors into EOI exit bitmap */ kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic)); - return 0; + return KVM_MSR_RET_OK; } static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) @@ -263,11 +263,11 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, int ret; if (!synic->active && (!host || data)) - return 1; + return KVM_MSR_RET_ERR; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); - ret = 0; + ret = KVM_MSR_RET_OK; switch (msr) { case HV_X64_MSR_SCONTROL: synic->control = data; @@ -276,7 +276,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, break; case HV_X64_MSR_SVERSION: if (!host) { - ret = 1; + ret = KVM_MSR_RET_ERR; break; } synic->version = data; @@ -286,7 +286,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, !synic->dont_zero_synic_pages) if (kvm_clear_guest(vcpu->kvm, data & PAGE_MASK, PAGE_SIZE)) { - ret = 1; + ret = KVM_MSR_RET_ERR; break; } synic->evt_page = data; @@ -298,7 +298,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, !synic->dont_zero_synic_pages) if (kvm_clear_guest(vcpu->kvm, data & PAGE_MASK, PAGE_SIZE)) { - ret = 1; + ret = KVM_MSR_RET_ERR; break; } synic->msg_page = data; @@ -319,7 +319,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); break; default: - ret = 1; + ret = KVM_MSR_RET_ERR; break; } return ret; @@ -365,7 +365,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) - return 1; + return KVM_MSR_RET_ERR; trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, to_hv_vcpu(vcpu)->vp_index, msr, data); @@ -396,7 +396,7 @@ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) break; } - return 0; + return KVM_MSR_RET_OK; } static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) @@ -404,7 +404,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) - return 1; + return KVM_MSR_RET_ERR; switch (msr) { case HV_X64_MSR_SYNDBG_CONTROL: @@ -431,7 +431,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); - return 0; + return KVM_MSR_RET_OK; } static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, @@ -440,9 +440,9 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, int ret; if (!synic->active && !host) - return 1; + return KVM_MSR_RET_ERR; - ret = 0; + ret = KVM_MSR_RET_OK; switch (msr) { case HV_X64_MSR_SCONTROL: *pdata = synic->control; @@ -463,7 +463,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); break; default: - ret = 1; + ret = KVM_MSR_RET_ERR; break; } return ret; @@ -695,12 +695,12 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && (!host || config)) - return 1; + return KVM_MSR_RET_ERR; if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && !(hv_vcpu->cpuid_cache.features_edx & HV_STIMER_DIRECT_MODE_AVAILABLE))) - return 1; + return KVM_MSR_RET_ERR; trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, config, host); @@ -714,7 +714,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, if (stimer->config.enable) stimer_mark_pending(stimer, false); - return 0; + return KVM_MSR_RET_OK; } static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, @@ -724,7 +724,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); if (!synic->active && (!host || count)) - return 1; + return KVM_MSR_RET_ERR; trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, stimer->index, count, host); @@ -741,19 +741,19 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, if (stimer->config.enable) stimer_mark_pending(stimer, false); - return 0; + return KVM_MSR_RET_OK; } static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) { *pconfig = stimer->config.as_uint64; - return 0; + return KVM_MSR_RET_OK; } static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) { *pcount = stimer->count; - return 0; + return KVM_MSR_RET_OK; } static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, @@ -1042,7 +1042,7 @@ static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata) return -EINVAL; *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; - return 0; + return KVM_MSR_RET_OK; } static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) @@ -1050,7 +1050,7 @@ static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) struct kvm_hv *hv = to_kvm_hv(kvm); *pdata = hv->hv_crash_ctl; - return 0; + return KVM_MSR_RET_OK; } static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) @@ -1059,7 +1059,7 @@ static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; - return 0; + return KVM_MSR_RET_OK; } static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) @@ -1071,7 +1071,7 @@ static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) return -EINVAL; hv->hv_crash_param[array_index_nospec(index, size)] = data; - return 0; + return KVM_MSR_RET_OK; } /* @@ -1380,7 +1380,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, struct kvm_hv *hv = to_kvm_hv(kvm); if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) - return 1; + return KVM_MSR_RET_ERR; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1426,7 +1426,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; if (kvm_vcpu_write_guest(vcpu, addr, instructions, i)) - return 1; + return KVM_MSR_RET_ERR; hv->hv_hypercall = data; break; } @@ -1476,23 +1476,23 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, break; case HV_X64_MSR_TSC_EMULATION_STATUS: if (data && !host) - return 1; + return KVM_MSR_RET_ERR; hv->hv_tsc_emulation_status = data; break; case HV_X64_MSR_TIME_REF_COUNT: /* read-only, but still ignore it if host-initiated */ if (!host) - return 1; + return KVM_MSR_RET_ERR; break; case HV_X64_MSR_TSC_INVARIANT_CONTROL: /* Only bit 0 is supported */ if (data & ~HV_EXPOSE_INVARIANT_TSC) - return 1; + return KVM_MSR_RET_ERR; /* The feature can't be disabled from the guest */ if (!host && hv->hv_invtsc_control && !data) - return 1; + return KVM_MSR_RET_ERR; hv->hv_invtsc_control = data; break; @@ -1501,9 +1501,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, return syndbg_set_msr(vcpu, msr, data, host); default: kvm_pr_unimpl_wrmsr(vcpu, msr, data); - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } /* Calculate cpu time spent by current task in 100ns units */ @@ -1521,7 +1521,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) - return 1; + return KVM_MSR_RET_ERR; switch (msr) { case HV_X64_MSR_VP_INDEX: { @@ -1529,10 +1529,10 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) u32 new_vp_index = (u32)data; if (!host || new_vp_index >= KVM_MAX_VCPUS) - return 1; + return KVM_MSR_RET_ERR; if (new_vp_index == hv_vcpu->vp_index) - return 0; + return KVM_MSR_RET_OK; /* * The VP index is initialized to vcpu_index by @@ -1555,13 +1555,13 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { hv_vcpu->hv_vapic = data; if (kvm_lapic_set_pv_eoi(vcpu, 0, 0)) - return 1; + return KVM_MSR_RET_ERR; break; } gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); if (kvm_is_error_hva(addr)) - return 1; + return KVM_MSR_RET_ERR; /* * Clear apic_assist portion of struct hv_vp_assist_page @@ -1569,13 +1569,13 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) * to be preserved e.g. on migration. */ if (__put_user(0, (u32 __user *)addr)) - return 1; + return KVM_MSR_RET_ERR; hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); if (kvm_lapic_set_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED, sizeof(struct hv_vp_assist_page))) - return 1; + return KVM_MSR_RET_ERR; break; } case HV_X64_MSR_EOI: @@ -1586,7 +1586,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); case HV_X64_MSR_VP_RUNTIME: if (!host) - return 1; + return KVM_MSR_RET_ERR; hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); break; case HV_X64_MSR_SCONTROL: @@ -1618,14 +1618,14 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_APIC_FREQUENCY: /* read-only, but still ignore it if host-initiated */ if (!host) - return 1; + return KVM_MSR_RET_ERR; break; default: kvm_pr_unimpl_wrmsr(vcpu, msr, data); - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, @@ -1636,7 +1636,7 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, struct kvm_hv *hv = to_kvm_hv(kvm); if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) - return 1; + return KVM_MSR_RET_ERR; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: @@ -1677,11 +1677,11 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, return syndbg_get_msr(vcpu, msr, pdata, host); default: kvm_pr_unimpl_rdmsr(vcpu, msr); - return 1; + return KVM_MSR_RET_ERR; } *pdata = data; - return 0; + return KVM_MSR_RET_OK; } static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, @@ -1691,7 +1691,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) - return 1; + return KVM_MSR_RET_ERR; switch (msr) { case HV_X64_MSR_VP_INDEX: @@ -1743,10 +1743,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, break; default: kvm_pr_unimpl_rdmsr(vcpu, msr); - return 1; + return KVM_MSR_RET_ERR; } *pdata = data; - return 0; + return KVM_MSR_RET_OK; } int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) @@ -1754,10 +1754,10 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); if (!host && !vcpu->arch.hyperv_enabled) - return 1; + return KVM_MSR_RET_ERR; if (kvm_hv_vcpu_init(vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_hv_msr_partition_wide(msr)) { int r; @@ -1775,10 +1775,10 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); if (!host && !vcpu->arch.hyperv_enabled) - return 1; + return KVM_MSR_RET_ERR; if (kvm_hv_vcpu_init(vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_hv_msr_partition_wide(msr)) { int r; -- 2.51.0 Instead of "0" and "1" use the related KVM_MSR_RET_* defines in the emulation code of VMX related MSR registers. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/vmx/nested.c | 18 +++--- arch/x86/kvm/vmx/pmu_intel.c | 20 +++---- arch/x86/kvm/vmx/tdx.c | 16 +++--- arch/x86/kvm/vmx/vmx.c | 104 +++++++++++++++++------------------ 4 files changed, 79 insertions(+), 79 deletions(-) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index bcea087b642f..76e8dc811bae 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1325,7 +1325,7 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) return -EINVAL; vmx->nested.msrs.basic = data; - return 0; + return KVM_MSR_RET_OK; } static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index, @@ -1378,7 +1378,7 @@ vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); *lowp = data; *highp = data >> 32; - return 0; + return KVM_MSR_RET_OK; } static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) @@ -1426,7 +1426,7 @@ static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) vmx->nested.msrs.misc_low = data; vmx->nested.msrs.misc_high = data >> 32; - return 0; + return KVM_MSR_RET_OK; } static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) @@ -1440,7 +1440,7 @@ static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) vmx->nested.msrs.ept_caps = data; vmx->nested.msrs.vpid_caps = data >> 32; - return 0; + return KVM_MSR_RET_OK; } static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index) @@ -1467,7 +1467,7 @@ static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) return -EINVAL; *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; - return 0; + return KVM_MSR_RET_OK; } /* @@ -1525,12 +1525,12 @@ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) return vmx_restore_vmx_ept_vpid_cap(vmx, data); case MSR_IA32_VMX_VMCS_ENUM: vmx->nested.msrs.vmcs_enum = data; - return 0; + return KVM_MSR_RET_OK; case MSR_IA32_VMX_VMFUNC: if (data & ~vmcs_config.nested.vmfunc_controls) return -EINVAL; vmx->nested.msrs.vmfunc_controls = data; - return 0; + return KVM_MSR_RET_OK; default: /* * The rest of the VMX capability MSRs do not support restore. @@ -1611,10 +1611,10 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) *pdata = msrs->vmfunc_controls; break; default: - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } /* diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index de1d9785c01f..8bab64a748b8 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -374,10 +374,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) { break; } - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -391,14 +391,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: if (data & pmu->fixed_ctr_ctrl_rsvd) - return 1; + return KVM_MSR_RET_ERR; if (pmu->fixed_ctr_ctrl != data) reprogram_fixed_counters(pmu, data); break; case MSR_IA32_PEBS_ENABLE: if (data & pmu->pebs_enable_rsvd) - return 1; + return KVM_MSR_RET_ERR; if (pmu->pebs_enable != data) { diff = pmu->pebs_enable ^ data; @@ -408,13 +408,13 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_DS_AREA: if (is_noncanonical_msr_address(data, vcpu)) - return 1; + return KVM_MSR_RET_ERR; pmu->ds_area = data; break; case MSR_PEBS_DATA_CFG: if (data & pmu->pebs_data_cfg_rsvd) - return 1; + return KVM_MSR_RET_ERR; pmu->pebs_data_cfg = data; break; @@ -423,7 +423,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { if ((msr & MSR_PMC_FULL_WIDTH_BIT) && (data & ~pmu->counter_bitmask[KVM_PMC_GP])) - return 1; + return KVM_MSR_RET_ERR; if (!msr_info->host_initiated && !(msr & MSR_PMC_FULL_WIDTH_BIT)) @@ -439,7 +439,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) reserved_bits ^= HSW_IN_TX_CHECKPOINTED; if (data & reserved_bits) - return 1; + return KVM_MSR_RET_ERR; if (data != pmc->eventsel) { pmc->eventsel = data; @@ -450,10 +450,10 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; } /* Not a known PMU MSR. */ - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } /* diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 6b99c8dbd8cc..9c798de48272 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -2236,15 +2236,15 @@ int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) msr->data = FEAT_CTL_LOCKED; if (vcpu->arch.mcg_cap & MCG_LMCE_P) msr->data |= FEAT_CTL_LMCE_ENABLED; - return 0; + return KVM_MSR_RET_OK; case MSR_IA32_MCG_EXT_CTL: if (!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P)) - return 1; + return KVM_MSR_RET_ERR; msr->data = vcpu->arch.mcg_ext_ctl; - return 0; + return KVM_MSR_RET_OK; default: if (!tdx_has_emulated_msr(msr->index)) - return 1; + return KVM_MSR_RET_ERR; return kvm_get_msr_common(vcpu, msr); } @@ -2256,15 +2256,15 @@ int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_MCG_EXT_CTL: if ((!msr->host_initiated && !(vcpu->arch.mcg_cap & MCG_LMCE_P)) || (msr->data & ~MCG_EXT_CTL_LMCE_EN)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.mcg_ext_ctl = msr->data; - return 0; + return KVM_MSR_RET_OK; default: if (tdx_is_read_only_msr(msr->index)) - return 1; + return KVM_MSR_RET_ERR; if (!tdx_has_emulated_msr(msr->index)) - return 1; + return KVM_MSR_RET_ERR; return kvm_set_msr_common(vcpu, msr); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 365c4ce283e5..a3282a5830ca 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -662,7 +662,7 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data) { unsigned int slot = msr - vmx->guest_uret_msrs; - int ret = 0; + int ret = KVM_MSR_RET_OK; if (msr->load_into_hardware) { preempt_disable(); @@ -1958,7 +1958,7 @@ int vmx_get_feature_msr(u32 msr, u64 *data) switch (msr) { case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested) - return 1; + return KVM_MSR_RET_ERR; return vmx_get_vmx_msr(&vmcs_config.nested, msr, data); default: return KVM_MSR_RET_UNSUPPORTED; @@ -1993,18 +1993,18 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSX_CTRL: if (!msr_info->host_initiated && !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) - return 1; + return KVM_MSR_RET_ERR; goto find_uret_msr; case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_has_spec_ctrl_msr(vcpu)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = to_vmx(vcpu)->spec_ctrl; break; @@ -2021,14 +2021,14 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX))) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_MCG_EXT_CTL: if (!msr_info->host_initiated && !(vmx->msr_ia32_feature_control & FEAT_CTL_LMCE_ENABLED)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.mcg_ext_ctl; break; case MSR_IA32_FEAT_CTL: @@ -2037,16 +2037,16 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; break; case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX)) - return 1; + return KVM_MSR_RET_ERR; if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, &msr_info->data)) - return 1; + return KVM_MSR_RET_ERR; #ifdef CONFIG_KVM_HYPERV /* * Enlightened VMCS v1 doesn't have certain VMCS fields but @@ -2062,19 +2062,19 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_RTIT_CTL: if (!vmx_pt_mode_is_host_guest()) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->pt_desc.guest.ctl; break; case MSR_IA32_RTIT_STATUS: if (!vmx_pt_mode_is_host_guest()) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->pt_desc.guest.status; break; case MSR_IA32_RTIT_CR3_MATCH: if (!vmx_pt_mode_is_host_guest() || !intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->pt_desc.guest.cr3_match; break; case MSR_IA32_RTIT_OUTPUT_BASE: @@ -2083,7 +2083,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) PT_CAP_topa_output) && !intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_single_range_output))) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->pt_desc.guest.output_base; break; case MSR_IA32_RTIT_OUTPUT_MASK: @@ -2092,14 +2092,14 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) PT_CAP_topa_output) && !intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_single_range_output))) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vmx->pt_desc.guest.output_mask; break; case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; if (!vmx_pt_mode_is_host_guest() || (index >= 2 * vmx->pt_desc.num_address_ranges)) - return 1; + return KVM_MSR_RET_ERR; if (index % 2) msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; else @@ -2127,7 +2127,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_get_msr_common(vcpu, msr_info); } - return 0; + return KVM_MSR_RET_OK; } static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu, @@ -2180,7 +2180,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmx_uret_msr *msr; - int ret = 0; + int ret = KVM_MSR_RET_OK; u32 msr_index = msr_info->index; u64 data = msr_info->data; u32 index; @@ -2241,7 +2241,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_DEBUGCTLMSR: if (!vmx_is_valid_debugctl(vcpu, data, msr_info->host_initiated)) - return 1; + return KVM_MSR_RET_ERR; data &= vmx_get_supported_debugctl(vcpu, msr_info->host_initiated); @@ -2254,15 +2254,15 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && (data & DEBUGCTLMSR_LBR)) intel_pmu_create_guest_lbr_event(vcpu); - return 0; + return KVM_MSR_RET_OK; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_MPX))) - return 1; + return KVM_MSR_RET_ERR; if (is_noncanonical_msr_address(data & PAGE_MASK, vcpu) || (data & MSR_IA32_BNDCFGS_RSVD)) - return 1; + return KVM_MSR_RET_ERR; if (is_guest_mode(vcpu) && ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) || @@ -2273,21 +2273,21 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) - return 1; + return KVM_MSR_RET_ERR; /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) - return 1; + return KVM_MSR_RET_ERR; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_has_spec_ctrl_msr(vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_spec_ctrl_test_value(data)) - return 1; + return KVM_MSR_RET_ERR; vmx->spec_ctrl = data; if (!data) @@ -2312,9 +2312,9 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_TSX_CTRL: if (!msr_info->host_initiated && !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) - return 1; + return KVM_MSR_RET_ERR; if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) - return 1; + return KVM_MSR_RET_ERR; goto find_uret_msr; case MSR_IA32_CR_PAT: ret = kvm_set_msr_common(vcpu, msr_info); @@ -2333,12 +2333,12 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) !(to_vmx(vcpu)->msr_ia32_feature_control & FEAT_CTL_LMCE_ENABLED)) || (data & ~MCG_EXT_CTL_LMCE_EN)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.mcg_ext_ctl = data; break; case MSR_IA32_FEAT_CTL: if (!is_vmx_feature_control_msr_valid(vmx, msr_info)) - return 1; + return KVM_MSR_RET_ERR; vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) @@ -2363,70 +2363,70 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) (!guest_cpu_cap_has(vcpu, X86_FEATURE_SGX_LC) || ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) - return 1; + return KVM_MSR_RET_ERR; vmx->msr_ia32_sgxlepubkeyhash [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; break; case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!msr_info->host_initiated) - return 1; /* they are read-only */ + return KVM_MSR_RET_ERR; /* they are read-only */ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX)) - return 1; + return KVM_MSR_RET_ERR; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_RTIT_CTL: if (!vmx_pt_mode_is_host_guest() || vmx_rtit_ctl_check(vcpu, data) || vmx->nested.vmxon) - return 1; + return KVM_MSR_RET_ERR; vmcs_write64(GUEST_IA32_RTIT_CTL, data); vmx->pt_desc.guest.ctl = data; pt_update_intercept_for_msr(vcpu); break; case MSR_IA32_RTIT_STATUS: if (!pt_can_write_msr(vmx)) - return 1; + return KVM_MSR_RET_ERR; if (data & MSR_IA32_RTIT_STATUS_MASK) - return 1; + return KVM_MSR_RET_ERR; vmx->pt_desc.guest.status = data; break; case MSR_IA32_RTIT_CR3_MATCH: if (!pt_can_write_msr(vmx)) - return 1; + return KVM_MSR_RET_ERR; if (!intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) - return 1; + return KVM_MSR_RET_ERR; vmx->pt_desc.guest.cr3_match = data; break; case MSR_IA32_RTIT_OUTPUT_BASE: if (!pt_can_write_msr(vmx)) - return 1; + return KVM_MSR_RET_ERR; if (!intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output) && !intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_single_range_output)) - return 1; + return KVM_MSR_RET_ERR; if (!pt_output_base_valid(vcpu, data)) - return 1; + return KVM_MSR_RET_ERR; vmx->pt_desc.guest.output_base = data; break; case MSR_IA32_RTIT_OUTPUT_MASK: if (!pt_can_write_msr(vmx)) - return 1; + return KVM_MSR_RET_ERR; if (!intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output) && !intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_single_range_output)) - return 1; + return KVM_MSR_RET_ERR; vmx->pt_desc.guest.output_mask = data; break; case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: if (!pt_can_write_msr(vmx)) - return 1; + return KVM_MSR_RET_ERR; index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; if (index >= 2 * vmx->pt_desc.num_address_ranges) - return 1; + return KVM_MSR_RET_ERR; if (is_noncanonical_msr_address(data, vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (index % 2) vmx->pt_desc.guest.addr_b[index / 2] = data; else @@ -2445,20 +2445,20 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & PERF_CAP_LBR_FMT) { if ((data & PERF_CAP_LBR_FMT) != (kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT)) - return 1; + return KVM_MSR_RET_ERR; if (!cpuid_model_is_consistent(vcpu)) - return 1; + return KVM_MSR_RET_ERR; } if (data & PERF_CAP_PEBS_FORMAT) { if ((data & PERF_CAP_PEBS_MASK) != (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK)) - return 1; + return KVM_MSR_RET_ERR; if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DS)) - return 1; + return KVM_MSR_RET_ERR; if (!guest_cpu_cap_has(vcpu, X86_FEATURE_DTES64)) - return 1; + return KVM_MSR_RET_ERR; if (!cpuid_model_is_consistent(vcpu)) - return 1; + return KVM_MSR_RET_ERR; } ret = kvm_set_msr_common(vcpu, msr_info); break; -- 2.51.0 Instead of "0" and "1" use the related KVM_MSR_RET_* defines in the emulation code of SVM related MSR registers. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/svm/pmu.c | 12 ++++++------ arch/x86/kvm/svm/svm.c | 44 +++++++++++++++++++++--------------------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index bc062285fbf5..c4b2fe77cc27 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -135,16 +135,16 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); if (pmc) { msr_info->data = pmc_read_counter(pmc); - return 0; + return KVM_MSR_RET_OK; } /* MSR_EVNTSELn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); if (pmc) { msr_info->data = pmc->eventsel; - return 0; + return KVM_MSR_RET_OK; } - return 1; + return KVM_MSR_RET_ERR; } static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -158,7 +158,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); if (pmc) { pmc_write_counter(pmc, data); - return 0; + return KVM_MSR_RET_OK; } /* MSR_EVNTSELn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); @@ -168,10 +168,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) pmc->eventsel = data; kvm_pmu_request_counter_reprogram(pmc); } - return 0; + return KVM_MSR_RET_OK; } - return 1; + return KVM_MSR_RET_ERR; } static void amd_pmu_refresh(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7cbf4d686415..73ff38617311 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2638,7 +2638,7 @@ static int svm_get_feature_msr(u32 msr, u64 *data) return KVM_MSR_RET_UNSUPPORTED; } - return 0; + return KVM_MSR_RET_OK; } static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, @@ -2655,14 +2655,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (sev_es_prevent_msr_access(vcpu, msr_info)) { msr_info->data = 0; - return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; + return vcpu->kvm->arch.has_protected_state ? -EINVAL : KVM_MSR_RET_OK; } switch (msr_info->index) { case MSR_AMD64_TSC_RATIO: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = svm->tsc_ratio_msr; break; case MSR_STAR: @@ -2737,7 +2737,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_has_spec_ctrl_msr(vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) msr_info->data = svm->vmcb->save.spec_ctrl; @@ -2747,7 +2747,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = svm->virt_spec_ctrl; break; @@ -2774,7 +2774,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) default: return kvm_get_msr_common(vcpu, msr_info); } - return 0; + return KVM_MSR_RET_OK; } static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, bool err) @@ -2793,7 +2793,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) int svm_dis, chg_mask; if (data & ~SVM_VM_CR_VALID_MASK) - return 1; + return KVM_MSR_RET_ERR; chg_mask = SVM_VM_CR_VALID_MASK; @@ -2807,21 +2807,21 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) /* check for svm_disable while efer.svme is set */ if (svm_dis && (vcpu->arch.efer & EFER_SVME)) - return 1; + return KVM_MSR_RET_ERR; - return 0; + return KVM_MSR_RET_OK; } static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); - int ret = 0; + int ret = KVM_MSR_RET_OK; u32 ecx = msr->index; u64 data = msr->data; if (sev_es_prevent_msr_access(vcpu, msr)) - return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0; + return vcpu->kvm->arch.has_protected_state ? -EINVAL : KVM_MSR_RET_OK; switch (ecx) { case MSR_AMD64_TSC_RATIO: @@ -2829,7 +2829,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) { if (!msr->host_initiated) - return 1; + return KVM_MSR_RET_ERR; /* * In case TSC scaling is not enabled, always * leave this MSR at the default value. @@ -2839,12 +2839,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * Ignore this value as well. */ if (data != 0 && data != svm->tsc_ratio_msr) - return 1; + return KVM_MSR_RET_ERR; break; } if (data & SVM_TSC_RATIO_RSVD) - return 1; + return KVM_MSR_RET_ERR; svm->tsc_ratio_msr = data; @@ -2866,10 +2866,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && !guest_has_spec_ctrl_msr(vcpu)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_spec_ctrl_test_value(data)) - return 1; + return KVM_MSR_RET_ERR; if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) svm->vmcb->save.spec_ctrl = data; @@ -2894,10 +2894,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_VIRT_SSBD)) - return 1; + return KVM_MSR_RET_ERR; if (data & ~SPEC_CTRL_SSBD) - return 1; + return KVM_MSR_RET_ERR; svm->virt_spec_ctrl = data; break; @@ -2992,7 +2992,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) } if (data & DEBUGCTL_RESERVED_BITS) - return 1; + return KVM_MSR_RET_ERR; if (svm->vmcb->save.dbgctl == data) break; @@ -3009,7 +3009,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * originating from those kernels. */ if (!msr->host_initiated && !page_address_valid(vcpu, data)) - return 1; + return KVM_MSR_RET_ERR; svm->nested.hsave_msr = data & PAGE_MASK; break; @@ -3022,10 +3022,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u64 supported_de_cfg; if (svm_get_feature_msr(ecx, &supported_de_cfg)) - return 1; + return KVM_MSR_RET_ERR; if (data & ~supported_de_cfg) - return 1; + return KVM_MSR_RET_ERR; svm->msr_decfg = data; break; -- 2.51.0 Instead of "0" and "1" use the related KVM_MSR_RET_* defines in the common emulation code of MSR registers. No change of functionality intended. Signed-off-by: Juergen Gross --- arch/x86/kvm/mtrr.c | 12 +-- arch/x86/kvm/pmu.c | 12 +-- arch/x86/kvm/x86.c | 176 ++++++++++++++++++++++---------------------- arch/x86/kvm/xen.c | 14 ++-- 4 files changed, 107 insertions(+), 107 deletions(-) diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index 6f74e2b27c1e..b699cfddb0f3 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -99,13 +99,13 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) mtrr = find_mtrr(vcpu, msr); if (!mtrr) - return 1; + return KVM_MSR_RET_ERR; if (!kvm_mtrr_valid(vcpu, msr, data)) - return 1; + return KVM_MSR_RET_ERR; *mtrr = data; - return 0; + return KVM_MSR_RET_OK; } int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) @@ -121,13 +121,13 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) * VCNT = KVM_NR_VAR_MTRR */ *pdata = 0x500 | KVM_NR_VAR_MTRR; - return 0; + return KVM_MSR_RET_OK; } mtrr = find_mtrr(vcpu, msr); if (!mtrr) - return 1; + return KVM_MSR_RET_ERR; *pdata = *mtrr; - return 0; + return KVM_MSR_RET_OK; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 487ad19a236e..17c0b0bd3ecc 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -754,7 +754,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_pmu_call(get_msr)(vcpu, msr_info); } - return 0; + return KVM_MSR_RET_OK; } int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -771,7 +771,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_GLOBAL_STATUS: if (!msr_info->host_initiated) - return 1; /* RO MSR */ + return KVM_MSR_RET_ERR; /* RO MSR */ fallthrough; case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS: /* Per PPR, Read-only MSR. Writes are ignored. */ @@ -779,7 +779,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; if (data & pmu->global_status_rsvd) - return 1; + return KVM_MSR_RET_ERR; pmu->global_status = data; break; @@ -788,7 +788,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) fallthrough; case MSR_CORE_PERF_GLOBAL_CTRL: if (!kvm_valid_perf_global_ctrl(pmu, data)) - return 1; + return KVM_MSR_RET_ERR; if (pmu->global_ctrl != data) { diff = pmu->global_ctrl ^ data; @@ -802,7 +802,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * GLOBAL_STATUS, and so the set of reserved bits is the same. */ if (data & pmu->global_status_rsvd) - return 1; + return KVM_MSR_RET_ERR; fallthrough; case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: if (!msr_info->host_initiated) @@ -817,7 +817,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_pmu_call(set_msr)(vcpu, msr_info); } - return 0; + return KVM_MSR_RET_OK; } static void kvm_pmu_reset(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e87963a47aa5..3105e773b02b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -541,7 +541,7 @@ static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr, * and all reads (in which case @data is zeroed on failure; see above). */ if (host_initiated && !*data && kvm_is_advertised_msr(msr)) - return 0; + return KVM_MSR_RET_OK; if (!ignore_msrs) { kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", @@ -552,7 +552,7 @@ static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr, if (report_ignored_msrs) kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data); - return 0; + return KVM_MSR_RET_OK; } static struct kmem_cache *kvm_alloc_emulator_cache(void) @@ -670,14 +670,14 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) value = (value & mask) | (msrs->values[slot].host & ~mask); if (value == msrs->values[slot].curr) - return 0; + return KVM_MSR_RET_OK; err = wrmsrq_safe(kvm_uret_msrs_list[slot], value); if (err) - return 1; + return KVM_MSR_RET_ERR; msrs->values[slot].curr = value; kvm_user_return_register_notifier(msrs); - return 0; + return KVM_MSR_RET_OK; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr); @@ -1712,7 +1712,7 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, default: return kvm_x86_call(get_feature_msr)(index, data); } - return 0; + return KVM_MSR_RET_OK; } static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) @@ -1855,7 +1855,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, case MSR_CSTAR: case MSR_LSTAR: if (is_noncanonical_msr_address(data, vcpu)) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_IA32_SYSENTER_EIP: case MSR_IA32_SYSENTER_ESP: @@ -1875,12 +1875,12 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, break; case MSR_TSC_AUX: if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) - return 1; + return KVM_MSR_RET_ERR; if (!host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) && !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID)) - return 1; + return KVM_MSR_RET_ERR; /* * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has @@ -1892,7 +1892,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, * provide consistent behavior for the guest. */ if (guest_cpuid_is_intel_compatible(vcpu) && (data >> 32) != 0) - return 1; + return KVM_MSR_RET_ERR; data = (u32)data; break; @@ -1902,11 +1902,11 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT)) return KVM_MSR_RET_UNSUPPORTED; if (!kvm_is_valid_u_s_cet(vcpu, data)) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_KVM_INTERNAL_GUEST_SSP: if (!host_initiated) - return 1; + return KVM_MSR_RET_ERR; fallthrough; /* * Note that the MSR emulation here is flawed when a vCPU @@ -1929,10 +1929,10 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, if (index == MSR_IA32_INT_SSP_TAB && !guest_cpu_cap_has(vcpu, X86_FEATURE_LM)) return KVM_MSR_RET_UNSUPPORTED; if (is_noncanonical_msr_address(data, vcpu)) - return 1; + return KVM_MSR_RET_ERR; /* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */ if (index != MSR_IA32_INT_SSP_TAB && !IS_ALIGNED(data, 4)) - return 1; + return KVM_MSR_RET_ERR; break; } @@ -1971,12 +1971,12 @@ static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, switch (index) { case MSR_TSC_AUX: if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) - return 1; + return KVM_MSR_RET_ERR; if (!host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) && !guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID)) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_IA32_U_CET: case MSR_IA32_S_CET: @@ -1986,7 +1986,7 @@ static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, break; case MSR_KVM_INTERNAL_GUEST_SSP: if (!host_initiated) - return 1; + return KVM_MSR_RET_ERR; fallthrough; case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) @@ -1998,7 +1998,7 @@ static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, msr.host_initiated = host_initiated; ret = kvm_x86_call(get_msr)(vcpu, &msr); - if (!ret) + if (ret == KVM_MSR_RET_OK) *data = msr.data; return ret; } @@ -2133,7 +2133,7 @@ static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg, /* Call MSR emulation. */ r = kvm_emulate_msr_read(vcpu, msr, &data); - if (!r) { + if (r == KVM_MSR_RET_OK) { trace_kvm_msr_read(msr, data); if (reg < 0) { @@ -2174,7 +2174,7 @@ static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) /* Call MSR emulation. */ r = kvm_emulate_msr_write(vcpu, msr, data); - if (!r) { + if (r == KVM_MSR_RET_OK) { trace_kvm_msr_write(msr, data); } else { /* MSR write failed? See if we should ask user space */ @@ -3975,7 +3975,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return KVM_MSR_RET_UNSUPPORTED; if (data & ~kvm_caps.supported_perf_cap) - return 1; + return KVM_MSR_RET_ERR; /* * Note, this is not just a performance optimization! KVM @@ -3993,7 +3993,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) { if ((!guest_has_pred_cmd_msr(vcpu))) - return 1; + return KVM_MSR_RET_ERR; if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) && !guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB)) @@ -4010,7 +4010,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) reserved_bits |= PRED_CMD_SBPB; if (data & reserved_bits) - return 1; + return KVM_MSR_RET_ERR; if (!data) break; @@ -4021,10 +4021,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_FLUSH_CMD: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D)) - return 1; + return KVM_MSR_RET_ERR; if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH)) - return 1; + return KVM_MSR_RET_ERR; if (!data) break; @@ -4044,19 +4044,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ if (data & ~(BIT_ULL(18) | BIT_ULL(24))) { kvm_pr_unimpl_wrmsr(vcpu, msr, data); - return 1; + return KVM_MSR_RET_ERR; } vcpu->arch.msr_hwcr = data; break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { kvm_pr_unimpl_wrmsr(vcpu, msr, data); - return 1; + return KVM_MSR_RET_ERR; } break; case MSR_IA32_CR_PAT: if (!kvm_pat_valid(data)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.pat = data; break; @@ -4089,7 +4089,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) { /* RO bits */ if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK) - return 1; + return KVM_MSR_RET_ERR; /* R bits, i.e. writes are ignored, but don't fault. */ data = data & ~MSR_IA32_MISC_ENABLE_EMON; @@ -4099,7 +4099,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && ((old_val ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.ia32_misc_enable_msr = data; vcpu->arch.cpuid_dynamic_bits_dirty = true; } else { @@ -4109,7 +4109,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } case MSR_IA32_SMBASE: if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.smbase = data; break; case MSR_IA32_POWER_CTL: @@ -4129,7 +4129,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return KVM_MSR_RET_UNSUPPORTED; if (data & ~vcpu->arch.guest_supported_xss) - return 1; + return KVM_MSR_RET_ERR; if (vcpu->arch.ia32_xss == data) break; vcpu->arch.ia32_xss = data; @@ -4137,52 +4137,52 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.smi_count = data; break; case MSR_KVM_WALL_CLOCK_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) - return 1; + return KVM_MSR_RET_ERR; vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_WALL_CLOCK: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) - return 1; + return KVM_MSR_RET_ERR; vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data, 0); break; case MSR_KVM_SYSTEM_TIME_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) - return 1; + return KVM_MSR_RET_ERR; kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); break; case MSR_KVM_SYSTEM_TIME: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) - return 1; + return KVM_MSR_RET_ERR; kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); break; case MSR_KVM_ASYNC_PF_EN: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_pv_enable_async_pf(vcpu, data)) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_KVM_ASYNC_PF_INT: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_pv_enable_async_pf_int(vcpu, data)) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_KVM_ASYNC_PF_ACK: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) - return 1; + return KVM_MSR_RET_ERR; if (data & 0x1) { vcpu->arch.apf.pageready_pending = false; kvm_check_async_pf_completion(vcpu); @@ -4190,13 +4190,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_STEAL_TIME: if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) - return 1; + return KVM_MSR_RET_ERR; if (unlikely(!sched_info_on())) - return 1; + return KVM_MSR_RET_ERR; if (data & KVM_STEAL_RESERVED_MASK) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.st.msr_val = data; @@ -4208,19 +4208,19 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_PV_EOI_EN: if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) - return 1; + return KVM_MSR_RET_ERR; if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) - return 1; + return KVM_MSR_RET_ERR; break; case MSR_KVM_POLL_CONTROL: if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) - return 1; + return KVM_MSR_RET_ERR; /* only enable bit supported */ if (data & (-1ULL << 1)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.msr_kvm_poll_control = data; break; @@ -4273,44 +4273,44 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.osvw.status = data; break; case MSR_PLATFORM_INFO: if (!msr_info->host_initiated) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.msr_platform_info = data; break; case MSR_MISC_FEATURES_ENABLES: if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT || (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && !supports_cpuid_fault(vcpu))) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.msr_misc_features_enables = data; break; #ifdef CONFIG_X86_64 case MSR_IA32_XFD: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) - return 1; + return KVM_MSR_RET_ERR; if (data & ~kvm_guest_supported_xfd(vcpu)) - return 1; + return KVM_MSR_RET_ERR; fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); break; case MSR_IA32_XFD_ERR: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) - return 1; + return KVM_MSR_RET_ERR; if (data & ~kvm_guest_supported_xfd(vcpu)) - return 1; + return KVM_MSR_RET_ERR; vcpu->arch.guest_fpu.xfd_err = data; break; @@ -4325,7 +4325,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return KVM_MSR_RET_UNSUPPORTED; } - return 0; + return KVM_MSR_RET_OK; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_msr_common); @@ -4346,7 +4346,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P) && !host) - return 1; + return KVM_MSR_RET_ERR; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: @@ -4355,10 +4355,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1: last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1; if (msr > last_msr) - return 1; + return KVM_MSR_RET_ERR; if (!(mcg_cap & MCG_CMCI_P) && !host) - return 1; + return KVM_MSR_RET_ERR; offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2, last_msr + 1 - MSR_IA32_MC0_CTL2); data = vcpu->arch.mci_ctl2_banks[offset]; @@ -4366,17 +4366,17 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: last_msr = MSR_IA32_MCx_CTL(bank_num) - 1; if (msr > last_msr) - return 1; + return KVM_MSR_RET_ERR; offset = array_index_nospec(msr - MSR_IA32_MC0_CTL, last_msr + 1 - MSR_IA32_MC0_CTL); data = vcpu->arch.mce_banks[offset]; break; default: - return 1; + return KVM_MSR_RET_ERR; } *pdata = data; - return 0; + return KVM_MSR_RET_OK; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -4500,7 +4500,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_SMBASE: if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.smbase; break; case MSR_SMI_COUNT: @@ -4517,61 +4517,61 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_WALL_CLOCK: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_WALL_CLOCK_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.time; break; case MSR_KVM_SYSTEM_TIME_NEW: if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.apf.msr_en_val; break; case MSR_KVM_ASYNC_PF_INT: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.apf.msr_int_val; break; case MSR_KVM_ASYNC_PF_ACK: if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = 0; break; case MSR_KVM_STEAL_TIME: if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.pv_eoi.msr_val; break; case MSR_KVM_POLL_CONTROL: if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.msr_kvm_poll_control; break; @@ -4587,7 +4587,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_XSS: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.ia32_xss; break; case MSR_K7_CLK_CTL: @@ -4632,18 +4632,18 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpu_cap_has(vcpu, X86_FEATURE_OSVW)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.osvw.status; break; case MSR_PLATFORM_INFO: if (!msr_info->host_initiated && !vcpu->kvm->arch.guest_can_read_msr_platform_info) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.msr_platform_info; break; case MSR_MISC_FEATURES_ENABLES: @@ -4656,14 +4656,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_XFD: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; break; case MSR_IA32_XFD_ERR: if (!msr_info->host_initiated && !guest_cpu_cap_has(vcpu, X86_FEATURE_XFD)) - return 1; + return KVM_MSR_RET_ERR; msr_info->data = vcpu->arch.guest_fpu.xfd_err; break; @@ -4678,7 +4678,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return KVM_MSR_RET_UNSUPPORTED; } - return 0; + return KVM_MSR_RET_OK; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_msr_common); @@ -6103,7 +6103,7 @@ static int kvm_translate_kvm_reg(struct kvm_vcpu *vcpu, default: return -EINVAL; } - return 0; + return KVM_MSR_RET_OK; } static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val) @@ -6116,7 +6116,7 @@ static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val) if (put_user(val, user_val)) return -EFAULT; - return 0; + return KVM_MSR_RET_OK; } static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val) @@ -6129,7 +6129,7 @@ static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val) if (do_set_msr(vcpu, msr, &val)) return -EINVAL; - return 0; + return KVM_MSR_RET_OK; } static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl, @@ -6153,7 +6153,7 @@ static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl, if (reg->type == KVM_X86_REG_TYPE_KVM) { r = kvm_translate_kvm_reg(vcpu, reg); - if (r) + if (r != KVM_MSR_RET_OK) return r; } @@ -7611,7 +7611,7 @@ static void kvm_probe_feature_msr(u32 msr_index) { u64 data; - if (kvm_get_feature_msr(NULL, msr_index, &data, true)) + if (kvm_get_feature_msr(NULL, msr_index, &data, true) != KVM_MSR_RET_OK) return; msr_based_features[num_msr_based_features++] = msr_index; @@ -8709,7 +8709,7 @@ static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt, if (r < 0) return X86EMUL_UNHANDLEABLE; - if (r) { + if (r != KVM_MSR_RET_OK) { if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, complete_emulated_rdmsr, r)) return X86EMUL_IO_NEEDED; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index d6b2a665b499..df456bbf792c 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -1279,7 +1279,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; bool lm = is_long_mode(vcpu); - int r = 0; + int r = KVM_MSR_RET_OK; mutex_lock(&kvm->arch.xen.xen_lock); if (kvm->arch.xen.long_mode != lm) { @@ -1291,11 +1291,11 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) */ if (kvm->arch.xen.shinfo_cache.active && kvm_xen_shared_info_init(kvm)) - r = 1; + r = KVM_MSR_RET_ERR; } mutex_unlock(&kvm->arch.xen.xen_lock); - if (r) + if (r != KVM_MSR_RET_OK) return r; /* @@ -1328,7 +1328,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) if (kvm_vcpu_write_guest(vcpu, page_addr + (i * sizeof(instructions)), instructions, sizeof(instructions))) - return 1; + return KVM_MSR_RET_ERR; } } else { /* @@ -1343,7 +1343,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) int ret; if (page_num >= blob_size) - return 1; + return KVM_MSR_RET_ERR; blob_addr += page_num * PAGE_SIZE; @@ -1354,9 +1354,9 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE); kfree(page); if (ret) - return 1; + return KVM_MSR_RET_ERR; } - return 0; + return KVM_MSR_RET_OK; } int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc) -- 2.51.0