For an address to be canonical it has to have its top bits equal to each other. The number of bits depends on the paging level and whether they're supposed to be ones or zeroes depends on whether the address points to kernel or user space. With Linear Address Masking (LAM) enabled, the definition of linear address canonicality is modified. Not all of the previously required bits need to be equal, only the first and last from the previously equal bitmask. So for example a 5-level paging kernel address needs to have bits [63] and [56] set. Add separate __canonical_address() implementation for CONFIG_KASAN_SW_TAGS since it's the only thing right now that enables LAM for kernel addresses (LAM_SUP bit in CR4). Signed-off-by: Maciej Wieczor-Retman --- Changelog v4: - Add patch to the series. arch/x86/include/asm/page.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 15c95e96fd15..97de2878f0b3 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -82,10 +82,20 @@ static __always_inline void *pfn_to_kaddr(unsigned long pfn) return __va(pfn << PAGE_SHIFT); } +/* + * CONFIG_KASAN_SW_TAGS requires LAM which changes the canonicality checks. + */ +#ifdef CONFIG_KASAN_SW_TAGS +static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return (vaddr | BIT_ULL(63) | BIT_ULL(vaddr_bits - 1)); +} +#else static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) { return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); } +#endif static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) { -- 2.50.1