Implement gmap_protect_asce_top_level(), which was a stub. This function was a stub due to cross dependencies with other patches. Signed-off-by: Claudio Imbrenda --- arch/s390/kvm/gmap.c | 73 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c index 29ce8df697dd..cbb777e940d1 100644 --- a/arch/s390/kvm/gmap.c +++ b/arch/s390/kvm/gmap.c @@ -22,6 +22,7 @@ #include "dat.h" #include "gmap.h" #include "kvm-s390.h" +#include "faultin.h" static inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu) { @@ -988,10 +989,78 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, union asce asce, int e return NULL; } +#define CRST_TABLE_PAGES (_CRST_TABLE_SIZE / PAGE_SIZE) +struct gmap_protect_asce_top_level { + unsigned long seq; + struct guest_fault f[CRST_TABLE_PAGES]; +}; + +static inline int __gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg, + struct gmap_protect_asce_top_level *context) +{ + int rc, i; + + guard(write_lock)(&sg->kvm->mmu_lock); + + if (kvm_s390_array_needs_retry_safe(sg->kvm, context->seq, context->f)) + return -EAGAIN; + + scoped_guard(spinlock, &sg->parent->children_lock) { + for (i = 0; i < CRST_TABLE_PAGES; i++) { + rc = gmap_protect_rmap(mc, sg, context->f[i].gfn, 0, context->f[i].pfn, + TABLE_TYPE_REGION1 + 1, context->f[i].writable); + if (rc) + return rc; + } + sg->initialized = true; + gmap_add_child(sg->parent, sg); + } + + kvm_s390_release_faultin_array(sg->kvm, context->f, false); + return 0; +} + +static inline int _gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg, + struct gmap_protect_asce_top_level *context) +{ + int rc; + + if (kvm_s390_array_needs_retry_unsafe(sg->kvm, context->seq, context->f)) + return -EAGAIN; + do { + rc = kvm_s390_mmu_cache_topup(mc); + if (rc) + return rc; + rc = radix_tree_preload(GFP_KERNEL); + if (rc) + return rc; + rc = __gmap_protect_asce_top_level(mc, sg, context); + radix_tree_preload_end(); + } while (rc == -ENOMEM); + + return rc; +} + static int gmap_protect_asce_top_level(struct kvm_s390_mmu_cache *mc, struct gmap *sg) { - KVM_BUG_ON(1, sg->kvm); - return -EINVAL; + struct gmap_protect_asce_top_level context = {}; + union asce asce = sg->guest_asce; + int rc; + + KVM_BUG_ON(!sg->is_shadow, sg->kvm); + + context.seq = sg->kvm->mmu_invalidate_seq; + /* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */ + smp_rmb(); + + rc = kvm_s390_get_guest_pages(sg->kvm, context.f, asce.rsto, asce.dt + 1, false); + if (rc > 0) + rc = -EFAULT; + if (!rc) + rc = _gmap_protect_asce_top_level(mc, sg, &context); + if (rc) + kvm_s390_release_faultin_array(sg->kvm, context.f, true); + return rc; } /** -- 2.51.1