Add page table management functions to be used for KVM guest (gmap) page tables. This patch adds the boilerplate and functions for the allocation and deallocation of DAT tables. Signed-off-by: Claudio Imbrenda --- arch/s390/kvm/Makefile | 1 + arch/s390/kvm/dat.c | 91 ++++++++++++++++++++++++++++++++++++++ arch/s390/kvm/dat.h | 4 ++ arch/s390/mm/page-states.c | 1 + 4 files changed, 97 insertions(+) create mode 100644 arch/s390/kvm/dat.c diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index 9a723c48b05a..84315d2f75fb 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -9,6 +9,7 @@ ccflags-y := -Ivirt/kvm -Iarch/s390/kvm kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o gmap-vsie.o +kvm-y += dat.o kvm-$(CONFIG_VFIO_PCI_ZDEV_KVM) += pci.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/s390/kvm/dat.c b/arch/s390/kvm/dat.c new file mode 100644 index 000000000000..326be78adcda --- /dev/null +++ b/arch/s390/kvm/dat.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM guest address space mapping code + * + * Copyright IBM Corp. 2007, 2020, 2024 + * Author(s): Claudio Imbrenda + * Martin Schwidefsky + * David Hildenbrand + * Janosch Frank + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "dat.h" + +static inline struct page_table *dat_alloc_pt_noinit(void) +{ + struct page *page; + void *virt; + + page = alloc_pages(GFP_ATOMIC, 0); + if (!page) + return NULL; + + virt = page_to_virt(page); + __arch_set_page_dat(virt, 1); + return virt; +} + +struct page_table *dat_alloc_pt(unsigned long pte_bits, unsigned long pgste_bits) +{ + struct page_table *res; + + res = dat_alloc_pt_noinit(); + if (res) + dat_init_page_table(res, pte_bits, pgste_bits); + return res; +} + +static inline struct crst_table *dat_alloc_crst_noinit(void) +{ + struct page *page; + void *virt; + + page = alloc_pages(GFP_ATOMIC | __GFP_COMP, CRST_ALLOC_ORDER); + if (!page) + return NULL; + virt = page_to_virt(page); + __arch_set_page_dat(virt, 1UL << CRST_ALLOC_ORDER); + return virt; +} + +struct crst_table *dat_alloc_crst(unsigned long init) +{ + struct crst_table *res; + + res = dat_alloc_crst_noinit(); + if (res) + crst_table_init((void *)res, init); + return res; +} + +void dat_free_level(struct crst_table *table, bool owns_ptes) +{ + unsigned int i; + + for (i = 0; i < _CRST_ENTRIES; i++) { + if (table->crstes[i].h.fc || table->crstes[i].h.i) + continue; + if (!is_pmd(table->crstes[i])) + dat_free_level(dereference_crste(table->crstes[i]), owns_ptes); + else if (owns_ptes) + dat_free_pt(dereference_pmd(table->crstes[i].pmd)); + } + dat_free_crst(table); +} diff --git a/arch/s390/kvm/dat.h b/arch/s390/kvm/dat.h index 1e355239247b..5056cfa02619 100644 --- a/arch/s390/kvm/dat.h +++ b/arch/s390/kvm/dat.h @@ -385,6 +385,10 @@ static inline union crste _crste_fc1(kvm_pfn_t pfn, int tt, bool w, bool d) return res; } +void dat_free_level(struct crst_table *table, bool owns_ptes); +struct page_table *dat_alloc_pt(unsigned long pte_bits, unsigned long pgste_bits); +struct crst_table *dat_alloc_crst(unsigned long init); + static inline struct crst_table *crste_table_start(union crste *crstep) { return (struct crst_table *)ALIGN_DOWN((unsigned long)crstep, _CRST_TABLE_SIZE); diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 01f9b39e65f5..5bee173db72e 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -13,6 +13,7 @@ #include int __bootdata_preserved(cmma_flag); +EXPORT_SYMBOL(cmma_flag); void arch_free_page(struct page *page, int order) { -- 2.51.0