Now, as the page_ext holds count of IOMMU mappings, we can use it to assert that any page allocated/freed is indeed not in the IOMMU. The sanitizer doesn’t protect against mapping/unmapping during this period. However, that’s less harmful as the page is not used by the kernel. Signed-off-by: Mostafa Saleh --- drivers/iommu/iommu-debug-pagealloc.c | 23 +++++++++++++++++++++++ include/linux/iommu-debug-pagealloc.h | 14 ++++++++++++++ include/linux/mm.h | 5 +++++ 3 files changed, 42 insertions(+) diff --git a/drivers/iommu/iommu-debug-pagealloc.c b/drivers/iommu/iommu-debug-pagealloc.c index 4639cf9518e6..424e00cd103b 100644 --- a/drivers/iommu/iommu-debug-pagealloc.c +++ b/drivers/iommu/iommu-debug-pagealloc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "iommu-priv.h" @@ -73,6 +74,28 @@ static size_t iommu_debug_page_size(struct iommu_domain *domain) return 1UL << __ffs(domain->pgsize_bitmap); } +static bool iommu_debug_page_count(const struct page *page) +{ + unsigned int ref; + struct page_ext *page_ext = page_ext_get(page); + struct iommu_debug_metadata *d = get_iommu_data(page_ext); + + ref = atomic_read(&d->ref); + page_ext_put(page_ext); + return ref != 0; +} + +void __iommu_debug_check_unmapped(const struct page *page, int numpages) +{ + while (numpages--) { + if (WARN_ON(iommu_debug_page_count(page))) { + pr_warn("iommu: Detected page leak!\n"); + dump_page_owner(page); + } + page++; + } +} + void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size) { size_t off, end; diff --git a/include/linux/iommu-debug-pagealloc.h b/include/linux/iommu-debug-pagealloc.h index a439d6815ca1..46c3c1f70150 100644 --- a/include/linux/iommu-debug-pagealloc.h +++ b/include/linux/iommu-debug-pagealloc.h @@ -13,6 +13,20 @@ DECLARE_STATIC_KEY_FALSE(iommu_debug_initialized); extern struct page_ext_operations page_iommu_debug_ops; +void __iommu_debug_check_unmapped(const struct page *page, int numpages); + +static inline void iommu_debug_check_unmapped(const struct page *page, int numpages) +{ + if (static_branch_unlikely(&iommu_debug_initialized)) + __iommu_debug_check_unmapped(page, numpages); +} + +#else +static inline void iommu_debug_check_unmapped(const struct page *page, + int numpages) +{ +} + #endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */ #endif /* __LINUX_IOMMU_DEBUG_PAGEALLOC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 7a1819c20643..3763b71a7d3e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -36,6 +36,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -4133,12 +4134,16 @@ extern void __kernel_map_pages(struct page *page, int numpages, int enable); #ifdef CONFIG_DEBUG_PAGEALLOC static inline void debug_pagealloc_map_pages(struct page *page, int numpages) { + iommu_debug_check_unmapped(page, numpages); + if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 1); } static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) { + iommu_debug_check_unmapped(page, numpages); + if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } -- 2.52.0.223.gf5cc29aaa4-goog