Now, as the page_ext holds count of IOMMU mappings, we can use it to assert that any page allocated/freed is indeed not in the IOMMU. The sanitizer doesn’t protect against mapping/unmapping during this period. However, that’s less harmful as the page is not used by the kernel. Signed-off-by: Mostafa Saleh Tested-by: Qinxin Xia --- drivers/iommu/iommu-debug-pagealloc.c | 19 +++++++++++++++++++ include/linux/iommu-debug-pagealloc.h | 12 ++++++++++++ include/linux/mm.h | 5 +++++ 3 files changed, 36 insertions(+) diff --git a/drivers/iommu/iommu-debug-pagealloc.c b/drivers/iommu/iommu-debug-pagealloc.c index 0e14104b971c..5b26c84d3a0e 100644 --- a/drivers/iommu/iommu-debug-pagealloc.c +++ b/drivers/iommu/iommu-debug-pagealloc.c @@ -71,6 +71,25 @@ static size_t iommu_debug_page_size(struct iommu_domain *domain) return 1UL << __ffs(domain->pgsize_bitmap); } +static unsigned int iommu_debug_page_count(unsigned long phys) +{ + unsigned int ref; + struct page_ext *page_ext = get_iommu_page_ext(phys); + struct iommu_debug_metadate *d = get_iommu_data(page_ext); + + ref = atomic_read(&d->ref); + page_ext_put(page_ext); + return ref; +} + +void __iommu_debug_check_unmapped(const struct page *page, int numpages) +{ + while (numpages--) { + WARN_ON(iommu_debug_page_count(page_to_phys(page))); + page++; + } +} + void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size) { size_t off; diff --git a/include/linux/iommu-debug-pagealloc.h b/include/linux/iommu-debug-pagealloc.h index 180446d6d86f..84110e4ecfaa 100644 --- a/include/linux/iommu-debug-pagealloc.h +++ b/include/linux/iommu-debug-pagealloc.h @@ -22,6 +22,7 @@ void __iommu_debug_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); void __iommu_debug_remap(struct iommu_domain *domain, unsigned long iova, size_t size); +void __iommu_debug_check_unmapped(const struct page *page, int numpages); static inline void iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size) @@ -44,6 +45,12 @@ static inline void iommu_debug_remap(struct iommu_domain *domain, __iommu_debug_remap(domain, iova, size); } +static inline void iommu_debug_check_unmapped(const struct page *page, int numpages) +{ + if (static_branch_unlikely(&iommu_debug_initialized)) + __iommu_debug_check_unmapped(page, numpages); +} + void iommu_debug_init(void); #else @@ -66,6 +73,11 @@ static inline void iommu_debug_init(void) { } +static inline void iommu_debug_check_unmapped(const struct page *page, + int numpages) +{ +} + #endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */ #endif /* __LINUX_IOMMU_DEBUG_PAGEALLOC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index d16b33bacc32..895a60a49120 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -36,6 +36,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -3811,12 +3812,16 @@ extern void __kernel_map_pages(struct page *page, int numpages, int enable); #ifdef CONFIG_DEBUG_PAGEALLOC static inline void debug_pagealloc_map_pages(struct page *page, int numpages) { + iommu_debug_check_unmapped(page, numpages); + if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 1); } static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) { + iommu_debug_check_unmapped(page, numpages); + if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } -- 2.51.2.1026.g39e6a42477-goog