Add implementation of the device and iommu presevation in a separate file. Also set the device and iommu preserve/unpreserve ops in the struct iommu_ops. During normal shutdown the iommu translation is disabled. Since the root table is preserved during live update, it needs to be cleaned up and the context entries of the unpreserved devices need to be cleared. Signed-off-by: Samiullah Khawaja --- drivers/iommu/intel/Makefile | 1 + drivers/iommu/intel/iommu.c | 47 ++++++++++- drivers/iommu/intel/iommu.h | 27 +++++++ drivers/iommu/intel/liveupdate.c | 134 +++++++++++++++++++++++++++++++ 4 files changed, 205 insertions(+), 4 deletions(-) create mode 100644 drivers/iommu/intel/liveupdate.c diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile index ada651c4a01b..d38fc101bc35 100644 --- a/drivers/iommu/intel/Makefile +++ b/drivers/iommu/intel/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o +obj-$(CONFIG_IOMMU_LIVEUPDATE) += liveupdate.o diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 134302fbcd92..c95de93fb72f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,8 @@ static int rwbf_quirk; #define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap)) +static bool __maybe_clean_unpreserved_context_entries(struct intel_iommu *iommu); + /* * set to 1 to panic kernel if can't successfully enable VT-d * (used when kernel is launched w/ TXT) @@ -60,8 +63,6 @@ static int force_on = 0; static int intel_iommu_tboot_noforce; static int no_platform_optin; -#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) - /* * Take a root_entry and return the Lower Context Table Pointer (LCTP) * if marked present. @@ -2378,8 +2379,10 @@ void intel_iommu_shutdown(void) /* Disable PMRs explicitly here. */ iommu_disable_protect_mem_regions(iommu); - /* Make sure the IOMMUs are switched off */ - iommu_disable_translation(iommu); + if (!__maybe_clean_unpreserved_context_entries(iommu)) { + /* Make sure the IOMMUs are switched off */ + iommu_disable_translation(iommu); + } } } @@ -2902,6 +2905,38 @@ static const struct iommu_dirty_ops intel_second_stage_dirty_ops = { .set_dirty_tracking = intel_iommu_set_dirty_tracking, }; +#ifdef CONFIG_IOMMU_LIVEUPDATE +static bool __maybe_clean_unpreserved_context_entries(struct intel_iommu *iommu) +{ + struct device_domain_info *info; + struct pci_dev *pdev = NULL; + + if (!iommu->iommu.outgoing_preserved_state) + return false; + + for_each_pci_dev(pdev) { + info = dev_iommu_priv_get(&pdev->dev); + if (!info) + continue; + + if (info->iommu != iommu) + continue; + + if (dev_iommu_preserved_state(&pdev->dev)) + continue; + + domain_context_clear(info); + } + + return true; +} +#else +static bool __maybe_clean_unpreserved_context_entries(struct intel_iommu *iommu) +{ + return false; +} +#endif + static struct iommu_domain * intel_iommu_domain_alloc_second_stage(struct device *dev, struct intel_iommu *iommu, u32 flags) @@ -3925,6 +3960,10 @@ const struct iommu_ops intel_iommu_ops = { .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, .page_response = intel_iommu_page_response, + .preserve_device = intel_iommu_preserve_device, + .unpreserve_device = intel_iommu_unpreserve_device, + .preserve = intel_iommu_preserve, + .unpreserve = intel_iommu_unpreserve, }; static void quirk_iommu_igfx(struct pci_dev *dev) diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 25c5e22096d4..70032e86437d 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -557,6 +557,8 @@ struct root_entry { u64 hi; }; +#define ROOT_ENTRY_NR (VTD_PAGE_SIZE / sizeof(struct root_entry)) + /* * low 64 bits: * 0: present @@ -1276,6 +1278,31 @@ static inline int iopf_for_domain_replace(struct iommu_domain *new, return 0; } +#ifdef CONFIG_IOMMU_LIVEUPDATE +int intel_iommu_preserve_device(struct device *dev, struct device_ser *device_ser); +void intel_iommu_unpreserve_device(struct device *dev, struct device_ser *device_ser); +int intel_iommu_preserve(struct iommu_device *iommu, struct iommu_ser *iommu_ser); +void intel_iommu_unpreserve(struct iommu_device *iommu, struct iommu_ser *iommu_ser); +#else +static inline int intel_iommu_preserve_device(struct device *dev, struct device_ser *device_ser) +{ + return -EOPNOTSUPP; +} + +static inline void intel_iommu_unpreserve_device(struct device *dev, struct device_ser *device_ser) +{ +} + +static inline int intel_iommu_preserve(struct iommu_device *iommu, struct iommu_ser *iommu_ser) +{ + return -EOPNOTSUPP; +} + +static inline void intel_iommu_unpreserve(struct iommu_device *iommu, struct iommu_ser *iommu_ser) +{ +} +#endif + #ifdef CONFIG_INTEL_IOMMU_SVM void intel_svm_check(struct intel_iommu *iommu); struct iommu_domain *intel_svm_domain_alloc(struct device *dev, diff --git a/drivers/iommu/intel/liveupdate.c b/drivers/iommu/intel/liveupdate.c new file mode 100644 index 000000000000..82ba1daf1711 --- /dev/null +++ b/drivers/iommu/intel/liveupdate.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2025, Google LLC + * Author: Samiullah Khawaja + */ + +#define pr_fmt(fmt) "iommu: liveupdate: " fmt + +#include +#include +#include +#include +#include + +#include "iommu.h" +#include "../iommu-pages.h" + +static void unpreserve_iommu_context(struct intel_iommu *iommu, int end) +{ + struct context_entry *context; + int i; + + if (end < 0) + end = ROOT_ENTRY_NR; + + for (i = 0; i < end; i++) { + context = iommu_context_addr(iommu, i, 0, 0); + if (context) + iommu_unpreserve_page(context); + + if (!sm_supported(iommu)) + continue; + + context = iommu_context_addr(iommu, i, 0x80, 0); + if (context) + iommu_unpreserve_page(context); + } +} + +static int preserve_iommu_context(struct intel_iommu *iommu) +{ + struct context_entry *context; + int ret; + int i; + + for (i = 0; i < ROOT_ENTRY_NR; i++) { + context = iommu_context_addr(iommu, i, 0, 0); + if (context) { + ret = iommu_preserve_page(context); + if (ret) + goto error; + } + + if (!sm_supported(iommu)) + continue; + + context = iommu_context_addr(iommu, i, 0x80, 0); + if (context) { + ret = iommu_preserve_page(context); + if (ret) + goto error_sm; + } + } + + return 0; + +error_sm: + context = iommu_context_addr(iommu, i, 0, 0); + iommu_unpreserve_page(context); +error: + unpreserve_iommu_context(iommu, i); + return ret; +} + +int intel_iommu_preserve_device(struct device *dev, struct device_ser *device_ser) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + + if (!dev_is_pci(dev)) + return -EOPNOTSUPP; + + if (!info) + return -EINVAL; + + device_ser->domain_iommu_ser.did = domain_id_iommu(info->domain, info->iommu); + return 0; +} + +void intel_iommu_unpreserve_device(struct device *dev, struct device_ser *device_ser) +{ +} + +int intel_iommu_preserve(struct iommu_device *iommu_dev, struct iommu_ser *ser) +{ + struct intel_iommu *iommu; + int ret; + + iommu = container_of(iommu_dev, struct intel_iommu, iommu); + + spin_lock(&iommu->lock); + ret = preserve_iommu_context(iommu); + if (ret) + goto err; + + ret = iommu_preserve_page(iommu->root_entry); + if (ret) { + unpreserve_iommu_context(iommu, -1); + goto err; + } + + ser->intel.phys_addr = iommu->reg_phys; + ser->intel.root_table = __pa(iommu->root_entry); + ser->type = IOMMU_INTEL; + ser->token = ser->intel.phys_addr; + spin_unlock(&iommu->lock); + + return 0; +err: + spin_unlock(&iommu->lock); + return ret; +} + +void intel_iommu_unpreserve(struct iommu_device *iommu_dev, struct iommu_ser *iommu_ser) +{ + struct intel_iommu *iommu; + + iommu = container_of(iommu_dev, struct intel_iommu, iommu); + + spin_lock(&iommu->lock); + unpreserve_iommu_context(iommu, -1); + iommu_unpreserve_page(iommu->root_entry); + spin_unlock(&iommu->lock); +} -- 2.53.0.rc2.204.g2597b5adb4-goog