The current code only does the check if kasan is disabled for hw_tags mode. Here add the conditional checks for functional functions of generic mode and sw_tags mode. This is prepared for later adding kernel parameter kasan=on|off for all kasan modes. Signed-off-by: Baoquan He --- mm/kasan/generic.c | 20 ++++++++++++++++++-- mm/kasan/init.c | 6 ++++++ mm/kasan/quarantine.c | 3 +++ mm/kasan/report.c | 4 +++- mm/kasan/shadow.c | 23 ++++++++++++++++++++++- mm/kasan/sw_tags.c | 3 +++ 6 files changed, 55 insertions(+), 4 deletions(-) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index d54e89f8c3e7..8daea5892754 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -165,6 +165,9 @@ static __always_inline bool check_region_inline(const void *addr, size_t size, bool write, unsigned long ret_ip) { + if (!kasan_enabled()) + return true; + if (!kasan_arch_is_ready()) return true; @@ -203,12 +206,13 @@ bool kasan_byte_accessible(const void *addr) void kasan_cache_shrink(struct kmem_cache *cache) { - kasan_quarantine_remove_cache(cache); + if (kasan_enabled()) + kasan_quarantine_remove_cache(cache); } void kasan_cache_shutdown(struct kmem_cache *cache) { - if (!__kmem_cache_empty(cache)) + if (kasan_enabled() && !__kmem_cache_empty(cache)) kasan_quarantine_remove_cache(cache); } @@ -228,6 +232,9 @@ void __asan_register_globals(void *ptr, ssize_t size) int i; struct kasan_global *globals = ptr; + if (!kasan_enabled()) + return; + for (i = 0; i < size; i++) register_global(&globals[i]); } @@ -358,6 +365,9 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, unsigned int rem_free_meta_size; unsigned int orig_alloc_meta_offset; + if (!kasan_enabled()) + return; + if (!kasan_requires_meta()) return; @@ -510,6 +520,9 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) { struct kasan_cache *info = &cache->kasan_info; + if (!kasan_enabled()) + return 0; + if (!kasan_requires_meta()) return 0; @@ -535,6 +548,9 @@ void kasan_record_aux_stack(void *addr) struct kasan_alloc_meta *alloc_meta; void *object; + if (!kasan_enabled()) + return; + if (is_kfence_address(addr) || !slab) return; diff --git a/mm/kasan/init.c b/mm/kasan/init.c index ced6b29fcf76..43d95f329675 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -449,6 +449,9 @@ void kasan_remove_zero_shadow(void *start, unsigned long size) unsigned long addr, end, next; pgd_t *pgd; + if (!kasan_enabled()) + return; + addr = (unsigned long)kasan_mem_to_shadow(start); end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT); @@ -484,6 +487,9 @@ int kasan_add_zero_shadow(void *start, unsigned long size) int ret; void *shadow_start, *shadow_end; + if (!kasan_enabled()) + return 0; + shadow_start = kasan_mem_to_shadow(start); shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT); diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 6958aa713c67..a6dc2c3d8a15 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -405,6 +405,9 @@ static int __init kasan_cpu_quarantine_init(void) { int ret = 0; + if (!kasan_enabled()) + return 0; + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", kasan_cpu_online, kasan_cpu_offline); if (ret < 0) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 62c01b4527eb..884357fa74ed 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -576,7 +576,9 @@ bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long irq_flags; struct kasan_report_info info; - if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) { + if (unlikely(report_suppressed_sw()) || + unlikely(!report_enabled()) || + !kasan_enabled()) { ret = false; goto out; } diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index d2c70cd2afb1..637f2d02d2a3 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -125,6 +125,9 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) { void *shadow_start, *shadow_end; + if (!kasan_enabled()) + return; + if (!kasan_arch_is_ready()) return; @@ -150,6 +153,9 @@ EXPORT_SYMBOL_GPL(kasan_poison); #ifdef CONFIG_KASAN_GENERIC void kasan_poison_last_granule(const void *addr, size_t size) { + if (!kasan_enabled()) + return; + if (!kasan_arch_is_ready()) return; @@ -164,6 +170,8 @@ void kasan_unpoison(const void *addr, size_t size, bool init) { u8 tag = get_tag(addr); + if (!kasan_enabled()) + return; /* * Perform shadow offset calculation based on untagged address, as * some of the callers (e.g. kasan_unpoison_new_object) pass tagged @@ -277,7 +285,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb, static int __init kasan_memhotplug_init(void) { - hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); + if (kasan_enabled()) + hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); return 0; } @@ -390,6 +399,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) unsigned long shadow_start, shadow_end; int ret; + if (!kasan_enabled()) + return 0; + if (!kasan_arch_is_ready()) return 0; @@ -560,6 +572,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long region_start, region_end; unsigned long size; + if (!kasan_enabled()) + return; + if (!kasan_arch_is_ready()) return; @@ -655,6 +670,9 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) size_t shadow_size; unsigned long shadow_start; + if (!kasan_enabled()) + return 0; + shadow_start = (unsigned long)kasan_mem_to_shadow(addr); scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> KASAN_SHADOW_SCALE_SHIFT; @@ -691,6 +709,9 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) void kasan_free_module_shadow(const struct vm_struct *vm) { + if (!kasan_enabled()) + return; + if (IS_ENABLED(CONFIG_UML)) return; diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index b9382b5b6a37..01f19bc4a326 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -78,6 +78,9 @@ bool kasan_check_range(const void *addr, size_t size, bool write, u8 *shadow_first, *shadow_last, *shadow; void *untagged_addr; + if (!kasan_enabled()) + return true; + if (unlikely(size == 0)) return true; -- 2.41.0 This allows generic and sw_tags to be set in kernel cmdline too. When at it, rename 'kasan_arg' to 'kasan_arg_disabled' as a bool variable. And expose 'kasan_flag_enabled' to kasan common place too. This is prepared for later adding kernel parameter kasan=on|off for all kasan modes. Signed-off-by: Baoquan He --- include/linux/kasan-enabled.h | 4 +++- mm/kasan/common.c | 25 +++++++++++++++++++++++++ mm/kasan/hw_tags.c | 35 ++--------------------------------- 3 files changed, 30 insertions(+), 34 deletions(-) diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h index 6f612d69ea0c..32f2d19f599f 100644 --- a/include/linux/kasan-enabled.h +++ b/include/linux/kasan-enabled.h @@ -4,10 +4,12 @@ #include -#ifdef CONFIG_KASAN_HW_TAGS +extern bool kasan_arg_disabled; DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); +#ifdef CONFIG_KASAN_HW_TAGS + static __always_inline bool kasan_enabled(void) { return static_branch_likely(&kasan_flag_enabled); diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 9142964ab9c9..69a848f2a8aa 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -32,6 +32,31 @@ #include "kasan.h" #include "../slab.h" +/* + * Whether KASAN is enabled at all. + * The value remains false until KASAN is initialized. + */ +DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); +EXPORT_SYMBOL(kasan_flag_enabled); + +bool kasan_arg_disabled __ro_after_init; +/* kasan=off/on */ +static int __init early_kasan_flag(char *arg) +{ + if (!arg) + return -EINVAL; + + if (!strcmp(arg, "off")) + kasan_arg_disabled = true; + else if (!strcmp(arg, "on")) + kasan_arg_disabled = false; + else + return -EINVAL; + + return 0; +} +early_param("kasan", early_kasan_flag); + struct slab *kasan_addr_to_slab(const void *addr) { if (virt_addr_valid(addr)) diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 9a6927394b54..377e9c285a74 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -22,12 +22,6 @@ #include "kasan.h" -enum kasan_arg { - KASAN_ARG_DEFAULT, - KASAN_ARG_OFF, - KASAN_ARG_ON, -}; - enum kasan_arg_mode { KASAN_ARG_MODE_DEFAULT, KASAN_ARG_MODE_SYNC, @@ -41,17 +35,9 @@ enum kasan_arg_vmalloc { KASAN_ARG_VMALLOC_ON, }; -static enum kasan_arg kasan_arg __ro_after_init; static enum kasan_arg_mode kasan_arg_mode __ro_after_init; static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata; -/* - * Whether KASAN is enabled at all. - * The value remains false until KASAN is initialized by kasan_init_hw_tags(). - */ -DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled); -EXPORT_SYMBOL(kasan_flag_enabled); - /* * Whether the selected mode is synchronous, asynchronous, or asymmetric. * Defaults to KASAN_MODE_SYNC. @@ -85,23 +71,6 @@ unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT; DEFINE_PER_CPU(long, kasan_page_alloc_skip); -/* kasan=off/on */ -static int __init early_kasan_flag(char *arg) -{ - if (!arg) - return -EINVAL; - - if (!strcmp(arg, "off")) - kasan_arg = KASAN_ARG_OFF; - else if (!strcmp(arg, "on")) - kasan_arg = KASAN_ARG_ON; - else - return -EINVAL; - - return 0; -} -early_param("kasan", early_kasan_flag); - /* kasan.mode=sync/async/asymm */ static int __init early_kasan_mode(char *arg) { @@ -209,7 +178,7 @@ void kasan_init_hw_tags_cpu(void) * When this function is called, kasan_flag_enabled is not yet * set by kasan_init_hw_tags(). Thus, check kasan_arg instead. */ - if (kasan_arg == KASAN_ARG_OFF) + if (kasan_arg_disabled) return; /* @@ -227,7 +196,7 @@ void __init kasan_init_hw_tags(void) return; /* If KASAN is disabled via command line, don't initialize it. */ - if (kasan_arg == KASAN_ARG_OFF) + if (kasan_arg_disabled) return; switch (kasan_arg_mode) { -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Signed-off-by: Baoquan He --- mm/kasan/sw_tags.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 01f19bc4a326..dd963ba4d143 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -40,11 +40,17 @@ void __init kasan_init_sw_tags(void) { int cpu; + if (kasan_arg_disabled) + return; + for_each_possible_cpu(cpu) per_cpu(prng_state, cpu) = (u32)get_cycles(); kasan_init_tags(); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n", str_on_off(kasan_stack_collection_enabled())); } -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Here call jump_label_init() early in setup_arch() so that later kasan_init() can enable static key kasan_flag_enabled. Put jump_label_init() beofre parse_early_param() as other architectures do. Signed-off-by: Baoquan He --- arch/arm/kernel/setup.c | 6 ++++++ arch/arm/mm/kasan_init.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 0bfd66c7ada0..453a47a4c715 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -1135,6 +1135,12 @@ void __init setup_arch(char **cmdline_p) early_fixmap_init(); early_ioremap_init(); + /* + * Initialise the static keys early as they may be enabled by the + * kasan_init() or early parameters. + */ + jump_label_init(); + parse_early_param(); #ifdef CONFIG_MMU diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 111d4f703136..c764e1b9c9c5 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -212,6 +212,8 @@ void __init kasan_init(void) phys_addr_t pa_start, pa_end; u64 i; + if (kasan_arg_disabled) + return; /* * We are going to perform proper setup of shadow memory. * @@ -300,6 +302,10 @@ void __init kasan_init(void) local_flush_tlb_all(); memset(kasan_early_shadow_page, 0, PAGE_SIZE); + + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + pr_info("Kernel address sanitizer initialized\n"); init_task.kasan_depth = 0; } -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. And also need skip kasan_populate_early_vm_area_shadow() if kasan is disabled. Signed-off-by: Baoquan He --- arch/arm64/mm/kasan_init.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index d541ce45daeb..0e4ffe3f5d0e 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -384,6 +384,9 @@ void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) { unsigned long shadow_start, shadow_end; + if (!kasan_enabled()) + return; + if (!is_vmalloc_or_module_addr(start)) return; @@ -397,6 +400,9 @@ void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) void __init kasan_init(void) { + if (kasan_arg_disabled) + return; + kasan_init_shadow(); kasan_init_depth(); #if defined(CONFIG_KASAN_GENERIC) @@ -405,6 +411,7 @@ void __init kasan_init(void) * Software and Hardware Tag-Based modes still require * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly. */ + static_branch_enable(&kasan_flag_enabled); pr_info("KernelAddressSanitizer initialized (generic)\n"); #endif } -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Signed-off-by: Baoquan He --- arch/loongarch/mm/kasan_init.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index d2681272d8f0..0c32eee6910f 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -267,6 +267,8 @@ void __init kasan_init(void) u64 i; phys_addr_t pa_start, pa_end; + if (kasan_arg_disabled) + return; /* * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will * overflow UINTPTR_MAX and then looks like a user space address. @@ -327,6 +329,9 @@ void __init kasan_init(void) csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH); local_flush_tlb_all(); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized.\n"); -- 2.41.0 This includes 32bit, book3s/64 and book3e/64. And also add code to enable kasan_flag_enabled, this is for later usage. Signed-off-by: Baoquan He --- arch/powerpc/mm/kasan/init_32.c | 8 +++++++- arch/powerpc/mm/kasan/init_book3e_64.c | 6 ++++++ arch/powerpc/mm/kasan/init_book3s_64.c | 6 ++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c index 03666d790a53..b0c465f3fbf5 100644 --- a/arch/powerpc/mm/kasan/init_32.c +++ b/arch/powerpc/mm/kasan/init_32.c @@ -141,6 +141,9 @@ void __init kasan_init(void) u64 i; int ret; + if (kasan_arg_disabled) + return; + for_each_mem_range(i, &base, &end) { phys_addr_t top = min(end, total_lowmem); @@ -163,6 +166,9 @@ void __init kasan_init(void) clear_page(kasan_early_shadow_page); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); @@ -170,7 +176,7 @@ void __init kasan_init(void) void __init kasan_late_init(void) { - if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + if (IS_ENABLED(CONFIG_KASAN_VMALLOC) && kasan_enabled()) kasan_unmap_early_shadow_vmalloc(); } diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c index 60c78aac0f63..1e1c10467a2b 100644 --- a/arch/powerpc/mm/kasan/init_book3e_64.c +++ b/arch/powerpc/mm/kasan/init_book3e_64.c @@ -111,6 +111,9 @@ void __init kasan_init(void) u64 i; pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); + if (kasan_arg_disabled) + return; + for_each_mem_range(i, &start, &end) kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end)); @@ -125,6 +128,9 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, 0, PAGE_SIZE); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + /* Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c index 7d959544c077..9c5cf2354c8b 100644 --- a/arch/powerpc/mm/kasan/init_book3s_64.c +++ b/arch/powerpc/mm/kasan/init_book3s_64.c @@ -56,6 +56,9 @@ void __init kasan_init(void) u64 i; pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL); + if (kasan_arg_disabled) + return; + if (!early_radix_enabled()) { pr_warn("KASAN not enabled as it requires radix!"); return; @@ -94,6 +97,9 @@ void __init kasan_init(void) static_branch_inc(&powerpc_kasan_enabled_key); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + /* Enable error messages */ init_task.kasan_depth = 0; pr_info("KASAN init done\n"); -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Signed-off-by: Baoquan He --- arch/riscv/mm/kasan_init.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 41c635d6aca4..ac3ac227c765 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -485,6 +485,9 @@ void __init kasan_init(void) phys_addr_t p_start, p_end; u64 i; + if (kasan_arg_disabled) + return; + create_tmp_mapping(); csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode); @@ -531,6 +534,9 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); init_task.kasan_depth = 0; + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode); local_flush_tlb_all(); } -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Signed-off-by: Baoquan He --- arch/x86/mm/kasan_init_64.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 0539efd0d216..0f2f9311e9df 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -343,6 +343,9 @@ void __init kasan_init(void) unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end; int i; + if (kasan_arg_disabled) + return; + memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); /* @@ -450,6 +453,9 @@ void __init kasan_init(void) /* Flush TLBs again to be sure that write protection applied. */ __flush_tlb_all(); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); } -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Here call jump_label_init() early in setup_arch() so that later kasan_init() can enable static key kasan_flag_enabled. Put jump_label_init() beofre parse_early_param() as other architectures do. Signed-off-by: Baoquan He --- arch/xtensa/kernel/setup.c | 1 + arch/xtensa/mm/kasan_init.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index f72e280363be..aabeb23f41fa 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -352,6 +352,7 @@ void __init setup_arch(char **cmdline_p) mem_reserve(__pa(_SecondaryResetVector_text_start), __pa(_SecondaryResetVector_text_end)); #endif + jump_label_init(); parse_early_param(); bootmem_init(); kasan_init(); diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index f39c4d83173a..4a7b77f47225 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -70,6 +70,9 @@ void __init kasan_init(void) { int i; + if (kasan_arg_disabled) + return; + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); @@ -92,6 +95,9 @@ void __init kasan_init(void) local_flush_tlb_all(); memset(kasan_early_shadow_page, 0, PAGE_SIZE); + /* KASAN is now initialized, enable it. */ + static_branch_enable(&kasan_flag_enabled); + /* At this point kasan is fully initialized. Enable error messages. */ current->kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); -- 2.41.0 And also add code to enable kasan_flag_enabled, this is for later usage. Since kasan_init() is called before main(), enabling kasan_flag_enabled is done in arch_mm_preinit() which is after jump_label_init() invocation. Signed-off-by: Baoquan He --- arch/um/kernel/mem.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 76bec7de81b5..392a23d4ef96 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -26,6 +26,9 @@ int kasan_um_is_ready; void kasan_init(void) { + + if (kasan_arg_disabled) + return; /* * kasan_map_memory will map all of the required address space and * the host machine will allocate physical memory as necessary. @@ -58,6 +61,9 @@ static unsigned long brk_end; void __init arch_mm_preinit(void) { + /* Safe to call after jump_label_init(). Enables KASAN. */ + static_branch_enable(&kasan_flag_enabled); + /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); -- 2.41.0 Now everything is ready, set kasan=off can disable kasan for all three modes. Signed-off-by: Baoquan He --- include/linux/kasan-enabled.h | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h index 32f2d19f599f..21b6233f829c 100644 --- a/include/linux/kasan-enabled.h +++ b/include/linux/kasan-enabled.h @@ -4,34 +4,32 @@ #include +#ifdef CONFIG_KASAN extern bool kasan_arg_disabled; DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); -#ifdef CONFIG_KASAN_HW_TAGS - static __always_inline bool kasan_enabled(void) { return static_branch_likely(&kasan_flag_enabled); } +#else /* CONFIG_KASAN */ +static inline bool kasan_enabled(void) +{ + return false; +} +#endif +#ifdef CONFIG_KASAN_HW_TAGS static inline bool kasan_hw_tags_enabled(void) { return kasan_enabled(); } - #else /* CONFIG_KASAN_HW_TAGS */ - -static inline bool kasan_enabled(void) -{ - return IS_ENABLED(CONFIG_KASAN); -} - static inline bool kasan_hw_tags_enabled(void) { return false; } - #endif /* CONFIG_KASAN_HW_TAGS */ #endif /* LINUX_KASAN_ENABLED_H */ -- 2.41.0