OpenWRT users reported regression on ARMv6 devices after updating to latest HEAD, where tcpdump filter: tcpdump -i mon1 \ "not wlan addr3 3c37121a2b3c and not wlan addr2 184ecbca2a3a \ and not wlan addr2 14130b4d3f47 and not wlan addr2 f0f61cf440b7 \ and not wlan addr3 a84b4dedf471 and not wlan addr3 d022be17e1d7 \ and not wlan addr3 5c497967208b and not wlan addr2 706655784d5b" fails with warning: "Kernel filter failed: No error information" when using config: # CONFIG_BPF_JIT_ALWAYS_ON is not set CONFIG_BPF_JIT_DEFAULT_ON=y The issue arises because commits: 1. "bpf: Fix array bounds error with may_goto" changed default runtime to __bpf_prog_ret0_warn when jit_requested = 1 2. "bpf: Avoid __bpf_prog_ret0_warn when jit fails" returns error when jit_requested = 1 but jit fails This change restores interpreter fallback capability for BPF programs with stack size <= 512 bytes when jit fails. Reported-by: Felix Fietkau Closes: https://lore.kernel.org/bpf/2e267b4b-0540-45d8-9310-e127bf95fc63@nbd.name/ Fixes: 6ebc5030e0c5 ("bpf: Fix array bounds error with may_goto") Fixes: 86bc9c742426 ("bpf: Avoid __bpf_prog_ret0_warn when jit fails") Signed-off-by: KaFai Wan --- kernel/bpf/core.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5d1650af899d..2d86bd4b0b97 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2366,8 +2366,8 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, const struct bpf_insn *insn) { /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON - * is not working properly, or interpreter is being used when - * prog->jit_requested is not 0, so warn about it! + * or may_goto may cause stack size > 512 is not working properly, + * so warn about it! */ WARN_ON_ONCE(1); return 0; @@ -2478,10 +2478,10 @@ static void bpf_prog_select_func(struct bpf_prog *fp) * But for non-JITed programs, we don't need bpf_func, so no bounds * check needed. */ - if (!fp->jit_requested && - !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) { + if (idx < ARRAY_SIZE(interpreters)) { fp->bpf_func = interpreters[idx]; } else { + WARN_ON_ONCE(!fp->jit_requested); fp->bpf_func = __bpf_prog_ret0_warn; } #else @@ -2505,7 +2505,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) /* In case of BPF to BPF calls, verifier did all the prep * work with regards to JITing, etc. */ - bool jit_needed = fp->jit_requested; + bool jit_needed = false; if (fp->bpf_func) goto finalize; @@ -2515,6 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) jit_needed = true; bpf_prog_select_func(fp); + if (fp->bpf_func == __bpf_prog_ret0_warn) + jit_needed = true; /* eBPF JITs can rewrite the program in case constant * blinding is active. However, in case of error during -- 2.43.0