Filtering pid_tgid is meaningless when the current task is preempted by an interrupt. To address this, introduce the bpf_in_interrupt kfunc, which allows BPF programs to determine whether they are executing in interrupt context. This enables programs to avoid applying pid_tgid filtering when running in such contexts. Signed-off-by: Leon Hwang --- kernel/bpf/helpers.c | 9 +++++++++ kernel/bpf/verifier.c | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 401b4932cc49f..38991b7b4a9e9 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3711,6 +3711,14 @@ __bpf_kfunc int bpf_strstr(const char *s1__ign, const char *s2__ign) return bpf_strnstr(s1__ign, s2__ign, XATTR_SIZE_MAX); } +/** + * bpf_in_interrupt - Check whether it's in interrupt context + */ +__bpf_kfunc int bpf_in_interrupt(void) +{ + return in_interrupt(); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -3751,6 +3759,7 @@ BTF_ID_FLAGS(func, bpf_throw) #ifdef CONFIG_BPF_EVENTS BTF_ID_FLAGS(func, bpf_send_signal_task, KF_TRUSTED_ARGS) #endif +BTF_ID_FLAGS(func, bpf_in_interrupt, KF_FASTCALL) BTF_KFUNCS_END(generic_btf_ids) static const struct btf_kfunc_id_set generic_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c9dd16b2c56b..e30ecbfc29dad 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12259,6 +12259,7 @@ enum special_kfunc_type { KF_bpf_res_spin_lock_irqsave, KF_bpf_res_spin_unlock_irqrestore, KF___bpf_trap, + KF_bpf_in_interrupt, }; BTF_ID_LIST(special_kfunc_list) @@ -12327,6 +12328,7 @@ BTF_ID(func, bpf_res_spin_unlock) BTF_ID(func, bpf_res_spin_lock_irqsave) BTF_ID(func, bpf_res_spin_unlock_irqrestore) BTF_ID(func, __bpf_trap) +BTF_ID(func, bpf_in_interrupt) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -21977,6 +21979,15 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; + } else if (desc->func_id == special_kfunc_list[KF_bpf_in_interrupt]) { +#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&__preempt_count); + insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); + insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); + insn_buf[3] = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, NMI_MASK | HARDIRQ_MASK | + (IS_ENABLED(CONFIG_PREEMPT_RT) ? 0 : SOFTIRQ_MASK)); + *cnt = 4; +#endif } if (env->insn_aux_data[insn_idx].arg_prog) { -- 2.50.1