Add some helper functions for handling multiple guest faults at the same time. This will be needed for VSIE, where a nested guest access also needs to access all the page tables that map it. Signed-off-by: Claudio Imbrenda --- arch/s390/kvm/gaccess.h | 14 ++++++++++ arch/s390/kvm/kvm-s390.c | 44 +++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 56 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+) diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 3fde45a151f2..9c82f7460821 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -457,4 +457,18 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow, unsigned long saddr, unsigned long *datptr); +static inline int __kvm_s390_faultin_read_gpa(struct kvm *kvm, struct guest_fault *f, gpa_t gaddr, + unsigned long *val) +{ + phys_addr_t phys_addr; + int rc; + + rc = __kvm_s390_faultin_gfn(kvm, f, gpa_to_gfn(gaddr), false); + if (!rc) { + phys_addr = PFN_PHYS(f->pfn) | offset_in_page(gaddr); + *val = *(unsigned long *)phys_to_virt(phys_addr); + } + return rc; +} + #endif /* __KVM_S390_GACCESS_H */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 61aa64886c36..af8a62abec48 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4858,6 +4858,50 @@ static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu) current->thread.gmap_int_code, current->thread.gmap_teid.val); } +/** + * __kvm_s390_faultin_gfn() - fault in and pin a guest address + * @kvm: the guest + * @guest_fault: will be filled with information on the pin operation + * @gfn: guest frame + * @wr: if true indicates a write access + * + * Fault in and pin a guest address using absolute addressing, and without + * marking the page referenced. + * + * Context: Called with mm->mmap_lock in read mode. + * + * Return: + * * 0 in case of success, + * * -EFAULT if reading using the virtual address failed, + * * -EINTR if a signal is pending, + * * -EAGAIN if FOLL_NOWAIT was specified, but IO is needed + * * PGM_ADDRESSING if the guest address lies outside of guest memory. + */ +int __kvm_s390_faultin_gfn(struct kvm *kvm, struct guest_fault *guest_fault, gfn_t gfn, bool wr) +{ + struct kvm_memory_slot *slot; + kvm_pfn_t pfn; + int foll; + + foll = wr ? FOLL_WRITE : 0; + slot = gfn_to_memslot(kvm, gfn); + pfn = __kvm_faultin_pfn(slot, gfn, foll, &guest_fault->writable, &guest_fault->page); + if (is_noslot_pfn(pfn)) + return PGM_ADDRESSING; + if (is_sigpending_pfn(pfn)) + return -EINTR; + if (pfn == KVM_PFN_ERR_NEEDS_IO) + return -EAGAIN; + if (is_error_pfn(pfn)) + return -EFAULT; + + guest_fault->pfn = pfn; + guest_fault->gfn = gfn; + guest_fault->write_attempt = wr; + guest_fault->valid = true; + return 0; +} + /* * __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu * @vcpu: the vCPU whose gmap is to be fixed up diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c44fe0c3a097..dabcf65f58ff 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -22,6 +22,15 @@ #define KVM_S390_UCONTROL_MEMSLOT (KVM_USER_MEM_SLOTS + 0) +struct guest_fault { + gfn_t gfn; /* Guest frame */ + kvm_pfn_t pfn; /* Host PFN */ + struct page *page; /* Host page */ + bool writable; /* Mapping is writable */ + bool write_attempt; /* Write access attempted */ + bool valid; /* This entry contains valid data */ +}; + static inline void kvm_s390_fpu_store(struct kvm_run *run) { fpu_stfpc(&run->s.regs.fpc); @@ -464,12 +473,59 @@ int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc); int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags); int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot, unsigned long bits); +int __kvm_s390_faultin_gfn(struct kvm *kvm, struct guest_fault *f, gfn_t gfn, bool wr); static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags) { return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags); } +static inline void release_faultin_multiple(struct kvm *kvm, struct guest_fault *guest_faults, + int n, bool ignore) +{ + int i; + + for (i = 0; i < n; i++) { + kvm_release_faultin_page(kvm, guest_faults[i].page, ignore, + guest_faults[i].write_attempt); + guest_faults[i].page = NULL; + } +} + +static inline bool __kvm_s390_multiple_faults_need_retry(struct kvm *kvm, unsigned long seq, + struct guest_fault *guest_faults, int n, + bool unsafe) +{ + int i; + + for (i = 0; i < n; i++) { + if (!guest_faults[i].valid) + continue; + if ((unsafe && mmu_invalidate_retry_gfn_unsafe(kvm, seq, guest_faults[i].gfn)) || + (!unsafe && mmu_invalidate_retry_gfn(kvm, seq, guest_faults[i].gfn))) { + release_faultin_multiple(kvm, guest_faults, n, true); + return true; + } + } + return false; +} + +static inline int __kvm_s390_faultin_gfn_range(struct kvm *kvm, struct guest_fault *guest_faults, + gfn_t start, int n_pages, bool write_attempt) +{ + int i, rc = 0; + + for (i = 0; !rc && i < n_pages; i++) + rc = __kvm_s390_faultin_gfn(kvm, guest_faults + i, start + i, write_attempt); + return rc; +} + +#define release_faultin_array(kvm, array, ignore) \ + release_faultin_multiple(kvm, array, ARRAY_SIZE(array), ignore) + +#define __kvm_s390_fault_array_needs_retry(kvm, seq, array, unsafe) \ + __kvm_s390_multiple_faults_need_retry(kvm, seq, array, ARRAY_SIZE(array), unsafe) + /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); -- 2.51.0