Simplify the error paths in snp_launch_update() by using a mutex guard, allowing early return instead of using gotos. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index f59c65abe3cf..1b325ae61d15 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -8,6 +8,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -2367,7 +2368,6 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) struct kvm_memory_slot *memslot; long npages, count; void __user *src; - int ret = 0; if (!sev_snp_guest(kvm) || !sev->snp_context) return -EINVAL; @@ -2407,13 +2407,11 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) * initial expected state and better guard against unexpected * situations. */ - mutex_lock(&kvm->slots_lock); + guard(mutex)(&kvm->slots_lock); memslot = gfn_to_memslot(kvm, params.gfn_start); - if (!kvm_slot_has_gmem(memslot)) { - ret = -EINVAL; - goto out; - } + if (!kvm_slot_has_gmem(memslot)) + return -EINVAL; sev_populate_args.sev_fd = argp->sev_fd; sev_populate_args.type = params.type; @@ -2425,22 +2423,18 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) argp->error = sev_populate_args.fw_error; pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n", __func__, count, argp->error); - ret = -EIO; - } else { - params.gfn_start += count; - params.len -= count * PAGE_SIZE; - if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO) - params.uaddr += count * PAGE_SIZE; - - ret = 0; - if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) - ret = -EFAULT; + return -EIO; } -out: - mutex_unlock(&kvm->slots_lock); + params.gfn_start += count; + params.len -= count * PAGE_SIZE; + if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO) + params.uaddr += count * PAGE_SIZE; - return ret; + if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params))) + return -EFAULT; + + return 0; } static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) -- 2.51.0 Simplify the error paths in sev_mem_enc_ioctl() by using a mutex guard, allowing early return instead of using gotos. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1b325ae61d15..0ee1b77aeec5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2575,30 +2575,24 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) return -EFAULT; - mutex_lock(&kvm->lock); + guard(mutex)(&kvm->lock); /* Only the enc_context_owner handles some memory enc operations. */ if (is_mirroring_enc_context(kvm) && - !is_cmd_allowed_from_mirror(sev_cmd.id)) { - r = -EINVAL; - goto out; - } + !is_cmd_allowed_from_mirror(sev_cmd.id)) + return -EINVAL; /* * Once KVM_SEV_INIT2 initializes a KVM instance as an SNP guest, only * allow the use of SNP-specific commands. */ - if (sev_snp_guest(kvm) && sev_cmd.id < KVM_SEV_SNP_LAUNCH_START) { - r = -EPERM; - goto out; - } + if (sev_snp_guest(kvm) && sev_cmd.id < KVM_SEV_SNP_LAUNCH_START) + return -EPERM; switch (sev_cmd.id) { case KVM_SEV_ES_INIT: - if (!sev_es_enabled) { - r = -ENOTTY; - goto out; - } + if (!sev_es_enabled) + return -ENOTTY; fallthrough; case KVM_SEV_INIT: r = sev_guest_init(kvm, &sev_cmd); @@ -2667,15 +2661,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = snp_launch_finish(kvm, &sev_cmd); break; default: - r = -EINVAL; - goto out; + return -EINVAL; } if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) r = -EFAULT; -out: - mutex_unlock(&kvm->lock); return r; } -- 2.51.0 Simplify the error paths in sev_mem_enc_register_region() by using a mutex guard, allowing early return instead of using a goto. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0ee1b77aeec5..253f2ae24bfc 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2691,13 +2691,13 @@ int sev_mem_enc_register_region(struct kvm *kvm, if (!region) return -ENOMEM; - mutex_lock(&kvm->lock); + guard(mutex)(&kvm->lock); region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, FOLL_WRITE | FOLL_LONGTERM); if (IS_ERR(region->pages)) { ret = PTR_ERR(region->pages); - mutex_unlock(&kvm->lock); - goto e_free; + kfree(region); + return ret; } /* @@ -2714,13 +2714,8 @@ int sev_mem_enc_register_region(struct kvm *kvm, region->size = range->size; list_add_tail(®ion->list, &sev->regions_list); - mutex_unlock(&kvm->lock); return ret; - -e_free: - kfree(region); - return ret; } static struct enc_region * -- 2.51.0 Simplify the error paths in sev_mem_enc_unregister_region() by using a mutex guard, allowing early return instead of using gotos. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 253f2ae24bfc..47ff5267ab01 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2746,35 +2746,25 @@ int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range) { struct enc_region *region; - int ret; /* If kvm is mirroring encryption context it isn't responsible for it */ if (is_mirroring_enc_context(kvm)) return -EINVAL; - mutex_lock(&kvm->lock); + guard(mutex)(&kvm->lock); - if (!sev_guest(kvm)) { - ret = -ENOTTY; - goto failed; - } + if (!sev_guest(kvm)) + return -ENOTTY; region = find_enc_region(kvm, range); - if (!region) { - ret = -EINVAL; - goto failed; - } + if (!region) + return -EINVAL; sev_writeback_caches(kvm); __unregister_enc_region_locked(kvm, region); - mutex_unlock(&kvm->lock); return 0; - -failed: - mutex_unlock(&kvm->lock); - return ret; } int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd) -- 2.51.0 Simplify the error paths in snp_handle_guest_req() by using a mutex guard, allowing early return instead of using gotos. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 47ff5267ab01..5f46b7f073b0 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4090,12 +4090,10 @@ static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_ if (!sev_snp_guest(kvm)) return -EINVAL; - mutex_lock(&sev->guest_req_mutex); + guard(mutex)(&sev->guest_req_mutex); - if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) { - ret = -EIO; - goto out_unlock; - } + if (kvm_read_guest(kvm, req_gpa, sev->guest_req_buf, PAGE_SIZE)) + return -EIO; data.gctx_paddr = __psp_pa(sev->snp_context); data.req_paddr = __psp_pa(sev->guest_req_buf); @@ -4108,21 +4106,16 @@ static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_ */ ret = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, &fw_err); if (ret && !fw_err) - goto out_unlock; + return ret; - if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) { - ret = -EIO; - goto out_unlock; - } + if (kvm_write_guest(kvm, resp_gpa, sev->guest_resp_buf, PAGE_SIZE)) + return -EIO; /* No action is requested *from KVM* if there was a firmware error. */ svm_vmgexit_no_action(svm, SNP_GUEST_ERR(0, fw_err)); - ret = 1; /* resume guest */ - -out_unlock: - mutex_unlock(&sev->guest_req_mutex); - return ret; + /* resume guest */ + return 1; } static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa) -- 2.51.0 Simplify the lock management in sev_asid_new() by using a mutex guard, automatically releasing the mutex when following the goto. Signed-off-by: Carlos López --- arch/x86/kvm/svm/sev.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 5f46b7f073b0..95430d456a6f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -232,24 +232,20 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type) return ret; } - mutex_lock(&sev_bitmap_lock); - + scoped_guard(mutex, &sev_bitmap_lock) { again: - asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); - if (asid > max_asid) { - if (retry && __sev_recycle_asids(min_asid, max_asid)) { - retry = false; - goto again; + asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); + if (asid > max_asid) { + if (retry && __sev_recycle_asids(min_asid, max_asid)) { + retry = false; + goto again; + } + ret = -EBUSY; + goto e_uncharge; } - mutex_unlock(&sev_bitmap_lock); - ret = -EBUSY; - goto e_uncharge; + __set_bit(asid, sev_asid_bitmap); } - __set_bit(asid, sev_asid_bitmap); - - mutex_unlock(&sev_bitmap_lock); - sev->asid = asid; return 0; e_uncharge: -- 2.51.0