From: Lu Baolu The cap_ndoms() helper calculates the maximum available domain ID from the value of capability register, which can be inefficient if called repeatedly. Cache the maximum supported domain ID in max_domain_id field during initialization to avoid redundant calls to cap_ndoms() throughout the IOMMU driver. No functionality change. Signed-off-by: Lu Baolu Signed-off-by: Dan Williams --- drivers/iommu/intel/iommu.h | 1 + drivers/iommu/intel/dmar.c | 1 + drivers/iommu/intel/iommu.c | 10 +++++----- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 3056583d7f56..66c3aa549fd4 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -724,6 +724,7 @@ struct intel_iommu { /* mutex to protect domain_ida */ struct mutex did_lock; struct ida domain_ida; /* domain id allocator */ + unsigned long max_domain_id; unsigned long *copied_tables; /* bitmap of copied tables */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index ec975c73cfe6..a54934c0536f 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1099,6 +1099,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) spin_lock_init(&iommu->lock); ida_init(&iommu->domain_ida); mutex_init(&iommu->did_lock); + iommu->max_domain_id = cap_ndoms(iommu->cap); ver = readl(iommu->reg + DMAR_VER_REG); pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e236c7ec221f..848b300da63e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1356,7 +1356,7 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) } num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID, - cap_ndoms(iommu->cap) - 1, GFP_KERNEL); + iommu->max_domain_id - 1, GFP_KERNEL); if (num < 0) { pr_err("%s: No free domain ids\n", iommu->name); goto err_unlock; @@ -1420,7 +1420,7 @@ static void copied_context_tear_down(struct intel_iommu *iommu, did_old = context_domain_id(context); context_clear_entry(context); - if (did_old < cap_ndoms(iommu->cap)) { + if (did_old < iommu->max_domain_id) { iommu->flush.flush_context(iommu, did_old, PCI_DEVID(bus, devfn), DMA_CCMD_MASK_NOBIT, @@ -1986,7 +1986,7 @@ static int copy_context_table(struct intel_iommu *iommu, continue; did = context_domain_id(&ce); - if (did >= 0 && did < cap_ndoms(iommu->cap)) + if (did >= 0 && did < iommu->max_domain_id) ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL); set_context_copied(iommu, bus, devfn); @@ -2902,7 +2902,7 @@ static ssize_t domains_supported_show(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); - return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); + return sysfs_emit(buf, "%ld\n", iommu->max_domain_id); } static DEVICE_ATTR_RO(domains_supported); @@ -2913,7 +2913,7 @@ static ssize_t domains_used_show(struct device *dev, unsigned int count = 0; int id; - for (id = 0; id < cap_ndoms(iommu->cap); id++) + for (id = 0; id < iommu->max_domain_id; id++) if (ida_exists(&iommu->domain_ida, id)) count++; -- 2.25.1