Fix the warning: BUG: using smp_processor_id() in preemptible [00000000] code: sys temd/1 caller is larch_insn_text_copy+0x40/0xf0 Simply changing it to raw_smp_processor_id() is not enough: if preempt and CPU hotplug happens after raw_smp_processor_id() but before stop_machine(), the CPU where raw_smp_processor_id() has run may be offline when stop_machine() and no CPU will run copy_to_kernel_nofault() in text_copy_cb(). Thus guard the larch_insn_text_copy() calls with cpus_read_lock() and change stop_machine() to stop_machine_cpuslocked() to prevent this. Fixes: 9fbd18cf4c69 ("LoongArch: BPF: Add dynamic code modification support") Signed-off-by: Xi Ruoyao --- Change since [v1]: - Add lockdep_assert_cpus_held() with a comment to prevent people from calling larch_insn_text_copy() w/o acquiring the cpus lock in the future. [v1]:https://lore.kernel.org/20260225141404.178823-2-xry111@xry111.site arch/loongarch/kernel/inst.c | 10 ++++++++-- arch/loongarch/net/bpf_jit.c | 6 ++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c index bf037f0c6b26..7545ae3c796e 100644 --- a/arch/loongarch/kernel/inst.c +++ b/arch/loongarch/kernel/inst.c @@ -263,14 +263,20 @@ int larch_insn_text_copy(void *dst, void *src, size_t len) .dst = dst, .src = src, .len = len, - .cpu = smp_processor_id(), + .cpu = raw_smp_processor_id(), }; + /* + * Ensure copy.cpu won't be hot removed before stop_machine. If + * it's removed nobody will really update the text. + */ + lockdep_assert_cpus_held(); + start = round_down((size_t)dst, PAGE_SIZE); end = round_up((size_t)dst + len, PAGE_SIZE); set_memory_rw(start, (end - start) / PAGE_SIZE); - ret = stop_machine(text_copy_cb, ©, cpu_online_mask); + ret = stop_machine_cpuslocked(text_copy_cb, ©, cpu_online_mask); set_memory_rox(start, (end - start) / PAGE_SIZE); return ret; diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 3bd89f55960d..e8e0ad34928c 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1379,9 +1379,11 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) { int ret; + cpus_read_lock(); mutex_lock(&text_mutex); ret = larch_insn_text_copy(dst, src, len); mutex_unlock(&text_mutex); + cpus_read_unlock(); return ret ? ERR_PTR(-EINVAL) : dst; } @@ -1429,10 +1431,12 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t, if (ret) return ret; + cpus_read_lock(); mutex_lock(&text_mutex); if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); mutex_unlock(&text_mutex); + cpus_read_unlock(); return ret; } @@ -1450,10 +1454,12 @@ int bpf_arch_text_invalidate(void *dst, size_t len) for (i = 0; i < (len / sizeof(u32)); i++) inst[i] = INSN_BREAK; + cpus_read_lock(); mutex_lock(&text_mutex); if (larch_insn_text_copy(dst, inst, len)) ret = -EINVAL; mutex_unlock(&text_mutex); + cpus_read_unlock(); kvfree(inst); -- 2.53.0