Implement the iommu domain ops for presevation, unpresevation and restoration of iommu domains for liveupdate. Use the existing page walker to preserve the ioptdesc of the top_table and the lower tables. Preserve the top_level also so it can be restored during boot. Signed-off-by: Samiullah Khawaja --- drivers/iommu/generic_pt/iommu_pt.h | 96 +++++++++++++++++++++++++++++ include/linux/generic_pt/iommu.h | 10 +++ 2 files changed, 106 insertions(+) diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h index 3327116a441c..0a1adb6312dd 100644 --- a/drivers/iommu/generic_pt/iommu_pt.h +++ b/drivers/iommu/generic_pt/iommu_pt.h @@ -921,6 +921,102 @@ int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova, } EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU"); +/** + * unpreserve() - Unpreserve page tables and other state of a domain. + * @domain: Domain to unpreserve + */ +void DOMAIN_NS(unpreserve)(struct iommu_domain *domain, struct iommu_domain_ser *ser) +{ + struct pt_iommu *iommu_table = + container_of(domain, struct pt_iommu, domain); + struct pt_common *common = common_from_iommu(iommu_table); + struct pt_range range = pt_all_range(common); + struct pt_iommu_collect_args collect = { + .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list), + }; + + iommu_pages_list_add(&collect.free_list, range.top_table); + pt_walk_range(&range, __collect_tables, &collect); + + iommu_unpreserve_pages(&collect.free_list, -1); +} +EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unpreserve), "GENERIC_PT_IOMMU"); + +/** + * preserve() - Preserve page tables and other state of a domain. + * @domain: Domain to preserve + * + * Returns: -ERRNO on failure, on success. + */ +int DOMAIN_NS(preserve)(struct iommu_domain *domain, struct iommu_domain_ser *ser) +{ + struct pt_iommu *iommu_table = + container_of(domain, struct pt_iommu, domain); + struct pt_common *common = common_from_iommu(iommu_table); + struct pt_range range = pt_all_range(common); + struct pt_iommu_collect_args collect = { + .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list), + }; + int ret; + + iommu_pages_list_add(&collect.free_list, range.top_table); + pt_walk_range(&range, __collect_tables, &collect); + + ret = iommu_preserve_pages(&collect.free_list); + if (ret) + return ret; + + ser->top_table = virt_to_phys(range.top_table); + ser->top_level = range.top_level; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(preserve), "GENERIC_PT_IOMMU"); + +static int __restore_tables(struct pt_range *range, void *arg, + unsigned int level, struct pt_table_p *table) +{ + struct pt_state pts = pt_init(range, level, table); + int ret; + + for_each_pt_level_entry(&pts) { + if (pts.type == PT_ENTRY_TABLE) { + iommu_restore_page(virt_to_phys(pts.table_lower)); + ret = pt_descend(&pts, arg, __restore_tables); + if (ret) + return ret; + } + } + return 0; +} + +/** + * restore() - Restore page tables and other state of a domain. + * @domain: Domain to preserve + * + * Returns: -ERRNO on failure, on success. + */ +int DOMAIN_NS(restore)(struct iommu_domain *domain, struct iommu_domain_ser *ser) +{ + struct pt_iommu *iommu_table = + container_of(domain, struct pt_iommu, domain); + struct pt_common *common = common_from_iommu(iommu_table); + struct pt_range range = pt_all_range(common); + + iommu_restore_page(ser->top_table); + + /* Free new table */ + iommu_free_pages(range.top_table); + + /* Set the restored top table */ + pt_top_set(common, phys_to_virt(ser->top_table), ser->top_level); + + /* Restore all pages*/ + range = pt_all_range(common); + return pt_walk_range(&range, __restore_tables, NULL); +} +EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(restore), "GENERIC_PT_IOMMU"); + struct pt_unmap_args { struct iommu_pages_list free_list; pt_vaddr_t unmapped; diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h index 9eefbb74efd0..b824a8642571 100644 --- a/include/linux/generic_pt/iommu.h +++ b/include/linux/generic_pt/iommu.h @@ -13,6 +13,7 @@ struct iommu_iotlb_gather; struct pt_iommu_ops; struct pt_iommu_driver_ops; struct iommu_dirty_bitmap; +struct iommu_domain_ser; /** * DOC: IOMMU Radix Page Table @@ -198,6 +199,12 @@ struct pt_iommu_cfg { unsigned long iova, phys_addr_t paddr, \ size_t pgsize, size_t pgcount, \ int prot, gfp_t gfp, size_t *mapped); \ + int pt_iommu_##fmt##_preserve(struct iommu_domain *domain, \ + struct iommu_domain_ser *ser); \ + void pt_iommu_##fmt##_unpreserve(struct iommu_domain *domain, \ + struct iommu_domain_ser *ser); \ + int pt_iommu_##fmt##_restore(struct iommu_domain *domain, \ + struct iommu_domain_ser *ser); \ size_t pt_iommu_##fmt##_unmap_pages( \ struct iommu_domain *domain, unsigned long iova, \ size_t pgsize, size_t pgcount, \ @@ -224,6 +231,9 @@ struct pt_iommu_cfg { #define IOMMU_PT_DOMAIN_OPS(fmt) \ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \ .map_pages = &pt_iommu_##fmt##_map_pages, \ + .preserve = &pt_iommu_##fmt##_preserve, \ + .unpreserve = &pt_iommu_##fmt##_unpreserve, \ + .restore = &pt_iommu_##fmt##_restore, \ .unmap_pages = &pt_iommu_##fmt##_unmap_pages #define IOMMU_PT_DIRTY_OPS(fmt) \ .read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty -- 2.53.0.rc2.204.g2597b5adb4-goog