Introduced the definition of struct bpf_term_aux_states required to support fast-path termination of BPF programs. Added the memory allocation and free logic for newly added term_states feild in struct bpf_prog. Signed-off-by: Raj Sahu Signed-off-by: Siddharth Chintamaneni --- include/linux/bpf.h | 75 +++++++++++++++++++++++++++++---------------- kernel/bpf/core.c | 31 +++++++++++++++++++ 2 files changed, 79 insertions(+), 27 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8f6e87f0f3a8..caaee33744fc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1584,6 +1584,25 @@ struct bpf_stream_stage { int len; }; +struct call_aux_states { + int call_bpf_insn_idx; + int jit_call_idx; + u8 is_helper_kfunc; + u8 is_bpf_loop; + u8 is_bpf_loop_cb_inline; +}; + +struct bpf_term_patch_call_sites { + u32 call_sites_cnt; + struct call_aux_states *call_states; +}; + +struct bpf_term_aux_states { + struct bpf_prog *prog; + struct work_struct work; + struct bpf_term_patch_call_sites *patch_call_sites; +}; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; @@ -1618,6 +1637,7 @@ struct bpf_prog_aux { bool tail_call_reachable; bool xdp_has_frags; bool exception_cb; + bool is_bpf_loop_cb_non_inline; bool exception_boundary; bool is_extended; /* true if extended by freplace program */ bool jits_use_priv_stack; @@ -1696,33 +1716,34 @@ struct bpf_prog_aux { }; struct bpf_prog { - u16 pages; /* Number of allocated pages */ - u16 jited:1, /* Is our filter JIT'ed? */ - jit_requested:1,/* archs need to JIT the prog */ - gpl_compatible:1, /* Is filter GPL compatible? */ - cb_access:1, /* Is control block accessed? */ - dst_needed:1, /* Do we need dst entry? */ - blinding_requested:1, /* needs constant blinding */ - blinded:1, /* Was blinded */ - is_func:1, /* program is a bpf function */ - kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1, /* Do we call get_func_ip() */ - tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ - sleepable:1; /* BPF program is sleepable */ - enum bpf_prog_type type; /* Type of BPF program */ - enum bpf_attach_type expected_attach_type; /* For some prog types */ - u32 len; /* Number of filter blocks */ - u32 jited_len; /* Size of jited insns in bytes */ - u8 tag[BPF_TAG_SIZE]; - struct bpf_prog_stats __percpu *stats; - int __percpu *active; - unsigned int (*bpf_func)(const void *ctx, - const struct bpf_insn *insn); - struct bpf_prog_aux *aux; /* Auxiliary fields */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ + u16 pages; /* Number of allocated pages */ + u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ + dst_needed:1, /* Do we need dst entry? */ + blinding_requested:1, /* needs constant blinding */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ + kprobe_override:1, /* Do we override a kprobe? */ + has_callchain_buf:1, /* callchain buffer allocated? */ + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ + tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ + sleepable:1; /* BPF program is sleepable */ + enum bpf_prog_type type; /* Type of BPF program */ + enum bpf_attach_type expected_attach_type; /* For some prog types */ + u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ + u8 tag[BPF_TAG_SIZE]; + struct bpf_prog_stats __percpu *stats; + int __percpu *active; + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); + struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ + struct bpf_term_aux_states *term_states; /* Instructions for interpreter */ union { DECLARE_FLEX_ARRAY(struct sock_filter, insns); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ef01cc644a96..740b5a3a6b55 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -100,6 +100,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); struct bpf_prog_aux *aux; struct bpf_prog *fp; + struct bpf_term_aux_states *term_states = NULL; + struct bpf_term_patch_call_sites *patch_call_sites = NULL; size = round_up(size, __PAGE_SIZE); fp = __vmalloc(size, gfp_flags); @@ -118,11 +120,24 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag return NULL; } + term_states = kzalloc(sizeof(*term_states), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); + if (!term_states) + goto free_alloc_percpu; + + patch_call_sites = kzalloc(sizeof(*patch_call_sites), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags)); + if (!patch_call_sites) + goto free_bpf_term_states; + fp->pages = size / PAGE_SIZE; fp->aux = aux; fp->aux->prog = fp; fp->jit_requested = ebpf_jit_enabled(); fp->blinding_requested = bpf_jit_blinding_enabled(fp); + fp->term_states = term_states; + fp->term_states->patch_call_sites = patch_call_sites; + fp->term_states->patch_call_sites->call_sites_cnt = 0; + fp->term_states->prog = fp; + #ifdef CONFIG_CGROUP_BPF aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID; #endif @@ -140,6 +155,15 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag #endif return fp; + +free_bpf_term_states: + kfree(term_states); +free_alloc_percpu: + free_percpu(fp->active); + kfree(aux); + vfree(fp); + + return NULL; } struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) @@ -266,6 +290,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); fp->pages = pages; fp->aux->prog = fp; + fp->term_states->prog = fp; /* We keep fp->aux from fp_old around in the new * reallocated structure. @@ -273,6 +298,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, fp_old->aux = NULL; fp_old->stats = NULL; fp_old->active = NULL; + fp_old->term_states = NULL; __bpf_prog_free(fp_old); } @@ -287,6 +313,11 @@ void __bpf_prog_free(struct bpf_prog *fp) kfree(fp->aux->poke_tab); kfree(fp->aux); } + if (fp->term_states) { + if (fp->term_states->patch_call_sites) + kfree(fp->term_states->patch_call_sites); + kfree(fp->term_states); + } free_percpu(fp->stats); free_percpu(fp->active); vfree(fp); -- 2.43.0