From: pengdonglin Implement lazy validation of BTF type ordering to enable efficient binary search for sorted BTF while maintaining linear search fallback for unsorted cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Signed-off-by: pengdonglin Signed-off-by: Donglin Peng --- kernel/bpf/btf.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index da35d8636b9b..c76d77fd30a7 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -192,6 +192,8 @@ */ #define BTF_MAX_SIZE (16 * 1024 * 1024) +#define BTF_NEED_SORT_CHECK ((u32)-1) + #define for_each_member_from(i, from, struct_type, member) \ for (i = from, member = btf_type_member(struct_type) + from; \ i < btf_type_vlen(struct_type); \ @@ -550,6 +552,65 @@ u32 btf_nr_types(const struct btf *btf) return total; } +static int btf_compare_type_names(const void *a, const void *b, void *priv) +{ + struct btf *btf = (struct btf *)priv; + const struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a); + const struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b); + const char *na, *nb; + + if (!ta->name_off && tb->name_off) + return 1; + if (ta->name_off && !tb->name_off) + return -1; + if (!ta->name_off && !tb->name_off) + return 0; + + na = btf_name_by_offset(btf, ta->name_off); + nb = btf_name_by_offset(btf, tb->name_off); + return strcmp(na, nb); +} + +/* Verifies BTF type ordering by name and counts named types. + * + * Checks that types are sorted in ascending order with named types + * before anonymous ones. If verified, sets nr_sorted_types to the + * number of named types. + */ +static void btf_check_sorted(struct btf *btf, int start_id) +{ + const struct btf_type *t; + int i, n, nr_sorted_types; + + if (likely(btf->nr_sorted_types != BTF_NEED_SORT_CHECK)) + return; + + btf->nr_sorted_types = 0; + + if (btf->nr_types < 2) + return; + + nr_sorted_types = 0; + n = btf_nr_types(btf); + for (n--, i = start_id; i < n; i++) { + int k = i + 1; + + if (btf_compare_type_names(&i, &k, btf) > 0) + return; + + t = btf_type_by_id(btf, k); + if (t->name_off) + nr_sorted_types++; + } + + t = btf_type_by_id(btf, start_id); + if (t->name_off) + nr_sorted_types++; + + if (nr_sorted_types) + btf->nr_sorted_types = nr_sorted_types; +} + /* Find BTF types with matching names within the [left, right] index range. * On success, updates *left and *right to the boundaries of the matching range * and returns the leftmost matching index. @@ -617,6 +678,8 @@ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) err = btf_find_by_name_kind(base_btf, name, kind); if (err == -ENOENT) { + btf_check_sorted((struct btf *)btf, btf_start_id(btf)); + if (btf->nr_sorted_types) { /* binary search */ s32 l, r; @@ -5882,6 +5945,7 @@ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uat goto errout; } env->btf = btf; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN); if (!data) { @@ -6301,6 +6365,7 @@ static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name btf->data = data; btf->data_size = data_size; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", name); err = btf_parse_hdr(env); @@ -6418,6 +6483,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, btf->start_id = base_btf->nr_types; btf->start_str_off = base_btf->hdr.str_len; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", module_name); btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN); -- 2.34.1