In the main patch of this set we need to pin and unpin multiple consecutive guest-2 pages in guest-1. As these might not be consecutive in guest-1 it is necessary to iterate over all pages and store guest and host addresses for later use. As the new methods to use the existing {,un}pin_guest_page() methods these are moved up unchanged in the file to prevent to have to resort to forward declarations later on. Signed-off-by: Christoph Schlameuss --- arch/s390/kvm/vsie.c | 91 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 3d602bbd1f70b7bd8ddc2c54d43027dc37a6e032..e86fef0fa3919668902c766813991572c2311b09 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -67,6 +67,11 @@ struct vsie_page { __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ }; +struct kvm_address_pair { + gpa_t gpa; + hpa_t hpa; +}; + /** * gmap_shadow_valid() - check if a shadow guest address space matches the * given properties and is still valid @@ -159,6 +164,66 @@ static void write_scao(struct kvm_s390_sie_block *scb, hpa_t hpa) scb->scaol = (u32)(u64)hpa; } +/* + * Pin the guest page given by gpa and set hpa to the pinned host address. + * Will always be pinned writable. + * + * Returns: - 0 on success + * - -EINVAL if the gpa is not valid guest storage + */ +static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) +{ + struct page *page; + + page = gfn_to_page(kvm, gpa_to_gfn(gpa)); + if (!page) + return -EINVAL; + *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); + return 0; +} + +/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */ +static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) +{ + kvm_release_page_dirty(pfn_to_page(hpa >> PAGE_SHIFT)); + /* mark the page always as dirty for migration */ + mark_page_dirty(kvm, gpa_to_gfn(gpa)); +} + +/* unpin multiple guest pages pinned with pin_guest_pages() */ +static void unpin_guest_pages(struct kvm *kvm, struct kvm_address_pair *addr, unsigned int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + unpin_guest_page(kvm, addr[i].gpa, addr[i].hpa); + addr[i].gpa = 0; + addr[i].hpa = 0; + } +} + +/* pin nr_pages consecutive guest pages */ +static int pin_guest_pages(struct kvm *kvm, gpa_t gpa, unsigned int nr_pages, + struct kvm_address_pair *addr) +{ + hpa_t hpa; + int i, rc; + + /* the guest pages may not be mapped continuously, so pin each page */ + for (i = 0; i < nr_pages; i++) { + rc = pin_guest_page(kvm, gpa + PAGE_SIZE * i, &hpa); + if (rc) + goto err; + addr[i].gpa = gpa + PAGE_SIZE * i; + addr[i].hpa = hpa; + } + return i; + +err: + unpin_guest_pages(kvm, addr, i); + return -EFAULT; +} + /* copy the updated intervention request bits into the shadow scb */ static void update_intervention_requests(struct vsie_page *vsie_page) { @@ -718,32 +783,6 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return rc; } -/* - * Pin the guest page given by gpa and set hpa to the pinned host address. - * Will always be pinned writable. - * - * Returns: - 0 on success - * - -EINVAL if the gpa is not valid guest storage - */ -static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa) -{ - struct page *page; - - page = gfn_to_page(kvm, gpa_to_gfn(gpa)); - if (!page) - return -EINVAL; - *hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK); - return 0; -} - -/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */ -static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) -{ - kvm_release_page_dirty(pfn_to_page(hpa >> PAGE_SHIFT)); - /* mark the page always as dirty for migration */ - mark_page_dirty(kvm, gpa_to_gfn(gpa)); -} - /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { -- 2.51.1