When backing a guest page with a large page, check that the alignment of the guest page matches the alignment of the host physical page backing it within the large page. Also check that the memslot is large enough to fit the large page. Those checks are currently not needed, because memslots are guaranteed to be 1m-aligned, but this will change. Signed-off-by: Claudio Imbrenda --- arch/s390/kvm/faultin.c | 2 +- arch/s390/kvm/gmap.c | 32 ++++++++++++++++++++++++++------ arch/s390/kvm/gmap.h | 3 ++- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/arch/s390/kvm/faultin.c b/arch/s390/kvm/faultin.c index e37cd18200f5..ddf0ca71f374 100644 --- a/arch/s390/kvm/faultin.c +++ b/arch/s390/kvm/faultin.c @@ -109,7 +109,7 @@ int kvm_s390_faultin_gfn(struct kvm_vcpu *vcpu, struct kvm *kvm, struct guest_fa scoped_guard(read_lock, &kvm->mmu_lock) { if (!mmu_invalidate_retry_gfn(kvm, inv_seq, f->gfn)) { f->valid = true; - rc = gmap_link(mc, kvm->arch.gmap, f); + rc = gmap_link(mc, kvm->arch.gmap, f, slot); kvm_release_faultin_page(kvm, f->page, !!rc, f->write_attempt); f->page = NULL; } diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c index ef0c6ebfdde2..e3c1b070a11d 100644 --- a/arch/s390/kvm/gmap.c +++ b/arch/s390/kvm/gmap.c @@ -613,17 +613,37 @@ int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault) return rc; } -static inline bool gmap_2g_allowed(struct gmap *gmap, gfn_t gfn) +static inline bool gmap_2g_allowed(struct gmap *gmap, struct guest_fault *f, + struct kvm_memory_slot *slot) { return false; } -static inline bool gmap_1m_allowed(struct gmap *gmap, gfn_t gfn) +/** + * gmap_1m_allowed() - Check whether a 1M hugepage is allowed. + * @gmap: The gmap of the guest. + * @f: Describes the fault that is being resolved. + * @slot: The memslot the faulting address belongs to. + * + * The function checks whether the GMAP_FLAG_ALLOW_HPAGE_1M flag is set for + * @gmap, whether the offset of the address in the 1M virtual frame is the + * same as the offset in the physical 1M frame, and finally whether the whole + * 1M page would fit in the given memslot. + * + * Return: true if a 1M hugepage is allowed to back the faulting address, false + * otherwise. + */ +static inline bool gmap_1m_allowed(struct gmap *gmap, struct guest_fault *f, + struct kvm_memory_slot *slot) { - return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags); + return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags) && + !((f->gfn ^ f->pfn) & ~_SEGMENT_FR_MASK) && + slot->base_gfn <= ALIGN_DOWN(f->gfn, _PAGES_PER_SEGMENT) && + slot->base_gfn + slot->npages >= ALIGN(f->gfn + 1, _PAGES_PER_SEGMENT); } -int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f) +int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f, + struct kvm_memory_slot *slot) { unsigned int order; int rc, level; @@ -633,9 +653,9 @@ int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fau level = TABLE_TYPE_PAGE_TABLE; if (f->page) { order = folio_order(page_folio(f->page)); - if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f->gfn)) + if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f, slot)) level = TABLE_TYPE_REGION3; - else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f->gfn)) + else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f, slot)) level = TABLE_TYPE_SEGMENT; } rc = dat_link(mc, gmap->asce, level, uses_skeys(gmap), f); diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h index ccb5cd751e31..a2f74587ddbf 100644 --- a/arch/s390/kvm/gmap.h +++ b/arch/s390/kvm/gmap.h @@ -90,7 +90,8 @@ struct gmap *gmap_new(struct kvm *kvm, gfn_t limit); struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit); void gmap_remove_child(struct gmap *child); void gmap_dispose(struct gmap *gmap); -int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault); +int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault, + struct kvm_memory_slot *slot); void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end); int gmap_set_limit(struct gmap *gmap, gfn_t limit); int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr); -- 2.53.0