From: Donglin Peng Refactor btf_dedup_remap_types() by extracting its core logic into a new btf_remap_types() helper function. This eliminates code duplication and improves modularity while maintaining the same functionality. The new function encapsulates iteration over BTF types and BTF ext sections, accepting a callback for flexible type ID remapping. This makes the type remapping logic more maintainable and reusable. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng Reviewed-by: Eduard Zingerman --- tools/lib/bpf/btf.c | 63 +++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 18907f0fcf9f..0c1dab8a199a 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -3400,6 +3400,37 @@ int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) return 0; } +static int btf_remap_types(struct btf *btf, struct btf_ext *btf_ext, + type_id_visit_fn visit, void *ctx) +{ + int i, r; + + for (i = 0; i < btf->nr_types; i++) { + struct btf_type *t = btf_type_by_id(btf, btf->start_id + i); + struct btf_field_iter it; + __u32 *type_id; + + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (r) + return r; + + while ((type_id = btf_field_iter_next(&it))) { + r = visit(type_id, ctx); + if (r) + return r; + } + } + + if (!btf_ext) + return 0; + + r = btf_ext_visit_type_ids(btf_ext, visit, ctx); + if (r) + return r; + + return 0; +} + struct btf_dedup; static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts); @@ -5320,37 +5351,7 @@ static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx) */ static int btf_dedup_remap_types(struct btf_dedup *d) { - int i, r; - - for (i = 0; i < d->btf->nr_types; i++) { - struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); - struct btf_field_iter it; - __u32 *type_id; - - r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); - if (r) - return r; - - while ((type_id = btf_field_iter_next(&it))) { - __u32 resolved_id, new_id; - - resolved_id = resolve_type_id(d, *type_id); - new_id = d->hypot_map[resolved_id]; - if (new_id > BTF_MAX_NR_TYPES) - return -EINVAL; - - *type_id = new_id; - } - } - - if (!d->btf_ext) - return 0; - - r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d); - if (r) - return r; - - return 0; + return btf_remap_types(d->btf, d->btf_ext, btf_dedup_remap_type_id, d); } /* -- 2.34.1 From: Donglin Peng Introduce btf__permute() API to allow in-place rearrangement of BTF types. This function reorganizes BTF type order according to a provided array of type IDs, updating all type references to maintain consistency. The permutation process involves: 1. Shuffling types into new order based on the provided IDs array 2. Remapping all type ID references to point to new locations 3. Handling BTF extension data if provided via options This is particularly useful for optimizing type locality after BTF deduplication or for meeting specific layout requirements in specialized use cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng Acked-by: Eduard Zingerman --- tools/lib/bpf/btf.c | 176 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/btf.h | 31 +++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 208 insertions(+) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 0c1dab8a199a..aad630822584 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -5830,3 +5830,179 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf) btf->owns_base = false; return libbpf_err(err); } + +struct btf_permute { + /* .BTF section to be permuted in-place */ + struct btf *btf; + /* optional .BTF.ext info along the main BTF info */ + struct btf_ext *btf_ext; + /* Array containing original type IDs (exclude VOID type ID 0) + * in user-defined order + */ + __u32 *ids; + /* Array used to map from original type ID to a new permuted type + * ID + */ + __u32 *ids_map; + /* Number of elements in ids array */ + __u32 ids_sz; +}; + +static int btf_permute_shuffle_types(struct btf_permute *p); +static int btf_permute_remap_types(struct btf_permute *p); +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx); + +int btf__permute(struct btf *btf, __u32 *ids, __u32 ids_sz, const struct btf_permute_opts *opts) +{ + struct btf_permute p; + int err = 0; + __u32 *ids_map = NULL; + + if (!OPTS_VALID(opts, btf_permute_opts) || (ids_sz > btf->nr_types)) + return libbpf_err(-EINVAL); + + ids_map = calloc(ids_sz, sizeof(*ids_map)); + if (!ids_map) { + err = -ENOMEM; + goto done; + } + + p.btf = btf; + p.btf_ext = OPTS_GET(opts, btf_ext, NULL); + p.ids = ids; + p.ids_map = ids_map; + p.ids_sz = ids_sz; + + if (btf_ensure_modifiable(btf)) { + err = -ENOMEM; + goto done; + } + err = btf_permute_shuffle_types(&p); + if (err < 0) { + goto done; + } + err = btf_permute_remap_types(&p); + if (err < 0) { + goto done; + } + +done: + free(ids_map); + return libbpf_err(err); +} + +/* Shuffle BTF types. + * + * Rearranges types according to the order specified in p->ids array. + * The p->ids_map array stores the mapping from original type IDs to + * new shuffled IDs, which is used in the next phase to update type + * references. + */ +static int btf_permute_shuffle_types(struct btf_permute *p) +{ + struct btf *btf = p->btf; + const struct btf_type *t; + __u32 *new_offs = NULL, *ids_map; + void *nt, *new_types = NULL; + int i, id, len, err; + + new_offs = calloc(p->ids_sz, sizeof(*new_offs)); + new_types = calloc(btf->hdr->type_len, 1); + if (!new_offs || !new_types) { + err = -ENOMEM; + goto out_err; + } + + nt = new_types; + for (i = 0; i < p->ids_sz; i++) { + id = p->ids[i]; + /* type IDs from base_btf and the VOID type are not allowed */ + if (id < btf->start_id) { + err = -EINVAL; + goto out_err; + } + /* must be a valid type ID */ + t = btf__type_by_id(btf, id); + if (!t) { + err = -EINVAL; + goto out_err; + } + ids_map = &p->ids_map[id - btf->start_id]; + /* duplicate type IDs are not allowed */ + if (*ids_map) { + err = -EINVAL; + goto out_err; + } + len = btf_type_size(t); + memcpy(nt, t, len); + new_offs[i] = nt - new_types; + *ids_map = btf->start_id + i; + nt += len; + } + + /* resize */ + if (p->ids_sz < btf->nr_types) { + __u32 type_len = nt - new_types; + void *tmp_types; + + tmp_types = realloc(new_types, type_len); + if (!tmp_types) { + err = -ENOMEM; + goto out_err; + } + new_types = tmp_types; + btf->nr_types = p->ids_sz; + btf->type_offs_cap = p->ids_sz; + btf->types_data_cap = type_len; + btf->hdr->type_len = type_len; + btf->hdr->str_off = type_len; + btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len; + } + free(btf->types_data); + free(btf->type_offs); + btf->types_data = new_types; + btf->type_offs = new_offs; + return 0; + +out_err: + free(new_offs); + free(new_types); + return err; +} + +/* Callback function to remap individual type ID references + * + * This callback is invoked by btf_remap_types() for each type ID reference + * found in the BTF data. It updates the reference to point to the new + * permuted type ID using ids_map array. + */ +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx) +{ + struct btf_permute *p = ctx; + __u32 new_type_id = *type_id; + + /* skip references that point into the base BTF */ + if (new_type_id < p->btf->start_id) + return 0; + + new_type_id = p->ids_map[*type_id - p->btf->start_id]; + if (new_type_id > BTF_MAX_NR_TYPES) + return -EINVAL; + + *type_id = new_type_id; + return 0; +} + +/* Remap referenced type IDs into permuted type IDs. + * + * After BTF types are permuted, their final type IDs may differ from original + * ones. The map from original to a corresponding permuted type ID is stored + * in p->ids_map array and is populated during shuffle phase. During remapping + * phase we are rewriting all type IDs referenced from any BTF type (e.g., + * struct fields, func proto args, etc) to their final permuted type IDs. + */ +static int btf_permute_remap_types(struct btf_permute *p) +{ + return btf_remap_types(p->btf, p->btf_ext, btf_permute_remap_type_id, p); +} + diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index ccfd905f03df..a0bf9b011c94 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -273,6 +273,37 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); */ LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); +struct btf_permute_opts { + size_t sz; + /* optional .BTF.ext info along the main BTF info */ + struct btf_ext *btf_ext; + size_t :0; +}; +#define btf_permute_opts__last_field btf_ext + +/** + * @brief **btf__permute()** rearranges BTF types in-place according to specified mapping + * @param btf BTF object to permute + * @param ids Array containing original type IDs (exclude VOID type ID 0) in user-defined order. + * @param ids_sz Number of elements in @ids array + * @param opts Optional parameters for BTF extension data reference updates + * @return 0 on success, negative error code on failure + * + * **btf__permute()** performs an in-place permutation of BTF types, rearranging them according + * to the order specified in @ids array. If @ids_sz is smaller than the total number of types + * in @btf, the BTF will be truncated to contain only the types specified in @ids. After + * reordering, all type references within the BTF data and optional BTF extension are updated + * to maintain consistency. Subsequent btf__dedup may be required if the BTF is truncated during + * permutation. + * + * On error, negative error code is returned and errno is set appropriately. + * Common error codes include: + * - -EINVAL: Invalid parameters or invalid ID mapping (e.g., duplicate IDs, out-of-range IDs) + * - -ENOMEM: Memory allocation failure during permutation process + */ +LIBBPF_API int btf__permute(struct btf *btf, __u32 *ids, __u32 ids_sz, + const struct btf_permute_opts *opts); + struct btf_dump; struct btf_dump_opts { diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8ed8749907d4..b778e5a5d0a8 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -451,4 +451,5 @@ LIBBPF_1.7.0 { global: bpf_map__set_exclusive_program; bpf_map__exclusive_program; + btf__permute; } LIBBPF_1.6.0; -- 2.34.1 From: Donglin Peng This patch introduces binary search optimization for BTF type lookups when the BTF instance contains sorted types. The optimization significantly improves performance when searching for types in large BTF instances with sorted type names. For unsorted BTF or when nr_sorted_types is zero, the implementation falls back to the original linear search algorithm. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 145 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 122 insertions(+), 23 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index aad630822584..be092892c4ae 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -26,6 +26,10 @@ #define BTF_MAX_NR_TYPES 0x7fffffffU #define BTF_MAX_STR_OFFSET 0x7fffffffU +/* + * sort verification occurs lazily upon first btf_find_type_by_name_kind() call + */ +#define BTF_NEED_SORT_CHECK ((__u32)-1) static struct btf_type btf_void; @@ -92,6 +96,16 @@ struct btf { * - for split BTF counts number of types added on top of base BTF. */ __u32 nr_types; + /* number of sorted and named types in this BTF instance: + * - doesn't include special [0] void type; + * - for split BTF counts number of sorted and named types added on + * top of base BTF. + * - BTF_NEED_SORT_CHECK value indicates sort validation will be performed + * on first call to btf_find_type_by_name_kind. + * - zero value indicates applied sorting check with unsorted BTF or no + * named types. + */ + __u32 nr_sorted_types; /* if not NULL, points to the base BTF on top of which the current * split BTF is based */ @@ -897,44 +911,126 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id) return type_id; } -__s32 btf__find_by_name(const struct btf *btf, const char *type_name) +/* Performs binary search within specified type ID range to find the leftmost + * BTF type matching the given name. The search assumes types are sorted by + * name in lexicographical order within the specified range. + * + * Return: Type ID of leftmost matching type, or -ENOENT if not found + */ +static __s32 btf_find_type_by_name_bsearch(const struct btf *btf, const char *name, + __s32 start_id, __s32 end_id) { - __u32 i, nr_types = btf__type_cnt(btf); + const struct btf_type *t; + const char *tname; + __s32 l, r, m, lmost = -ENOENT; + int ret; + + l = start_id; + r = end_id; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); + tname = btf__str_by_offset(btf, t->name_off); + ret = strcmp(tname, name); + if (ret < 0) { + l = m + 1; + } else { + if (ret == 0) + lmost = m; + r = m - 1; + } + } - if (!strcmp(type_name, "void")) - return 0; + return lmost; +} - for (i = 1; i < nr_types; i++) { - const struct btf_type *t = btf__type_by_id(btf, i); - const char *name = btf__name_by_offset(btf, t->name_off); +/* Searches for a BTF type by name and optionally by kind. The function first + * checks if the search should start from the base BTF (if @start_id is before + * current BTF's start_id). If types are sorted, it uses binary search to find + * the leftmost matching type and then verifies the kind. For unsorted types, + * it falls back to linear search through all types. + * + * The function handles split BTF scenarios by recursively searching in base + * BTFs when necessary. When @kind is -1, only the name matching is performed. + * + * Return: Type ID of matching type on success, -ENOENT if not found + */ +static __s32 btf_find_type_by_name_kind(const struct btf *btf, int start_id, + const char *type_name, __u32 kind) +{ + const struct btf_type *t; + const char *tname; + int err = -ENOENT; + + if (start_id < btf->start_id) { + err = btf_find_type_by_name_kind(btf->base_btf, start_id, + type_name, kind); + if (err > 0) + goto out; + start_id = btf->start_id; + } + + if (btf->nr_sorted_types != BTF_NEED_SORT_CHECK) { + /* binary search */ + __s32 end_id; + bool skip_first; + int ret; + + end_id = btf->start_id + btf->nr_sorted_types - 1; + ret = btf_find_type_by_name_bsearch(btf, type_name, start_id, end_id); + if (ret < 0) + goto out; + if (kind == -1) + return ret; + skip_first = true; + do { + t = btf_type_by_id(btf, ret); + if (BTF_INFO_KIND(t->info) != kind) { + if (skip_first) { + skip_first = false; + continue; + } + } else if (skip_first) { + return ret; + } + tname = btf__str_by_offset(btf, t->name_off); + if (!strcmp(tname, type_name)) + return ret; + else + break; + } while (++ret <= end_id); + } else { + /* linear search */ + __u32 i, total; - if (name && !strcmp(type_name, name)) - return i; + total = btf__type_cnt(btf); + for (i = start_id; i < total; i++) { + t = btf_type_by_id(btf, i); + if (kind != -1 && btf_kind(t) != kind) + continue; + tname = btf__str_by_offset(btf, t->name_off); + if (tname && !strcmp(tname, type_name)) + return i; + } } - return libbpf_err(-ENOENT); +out: + return err; } static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id, const char *type_name, __u32 kind) { - __u32 i, nr_types = btf__type_cnt(btf); - if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) return 0; - for (i = start_id; i < nr_types; i++) { - const struct btf_type *t = btf__type_by_id(btf, i); - const char *name; - - if (btf_kind(t) != kind) - continue; - name = btf__name_by_offset(btf, t->name_off); - if (name && !strcmp(type_name, name)) - return i; - } + return libbpf_err(btf_find_type_by_name_kind(btf, start_id, type_name, kind)); +} - return libbpf_err(-ENOENT); +/* the kind value of -1 indicates that kind matching should be skipped */ +__s32 btf__find_by_name(const struct btf *btf, const char *type_name) +{ + return btf_find_by_name_kind(btf, btf->start_id, type_name, -1); } __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, @@ -1006,6 +1102,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) btf->fd = -1; btf->ptr_sz = sizeof(void *); btf->swapped_endian = false; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; if (base_btf) { btf->base_btf = base_btf; @@ -1057,6 +1154,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b btf->start_id = 1; btf->start_str_off = 0; btf->fd = -1; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; if (base_btf) { btf->base_btf = base_btf; @@ -1715,6 +1813,7 @@ static void btf_invalidate_raw_data(struct btf *btf) free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; } + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; } /* Ensure BTF is ready to be modified (by splitting into a three memory -- 2.34.1 From: Donglin Peng This patch adds lazy validation of BTF type ordering to determine if types are sorted by name. The check is performed on first access and cached, enabling efficient binary search for sorted BTF while maintaining linear search fallback for unsorted cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng --- tools/lib/bpf/btf.c | 69 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index be092892c4ae..56e555c43712 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -911,6 +911,73 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id) return type_id; } +/* Anonymous types (with empty names) are considered greater than named types + * and are sorted after them. Two anonymous types are considered equal. Named + * types are compared lexicographically. + */ +static int btf_compare_type_names(const void *a, const void *b, void *priv) +{ + struct btf *btf = (struct btf *)priv; + struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a); + struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b); + const char *na, *nb; + bool anon_a, anon_b; + + na = btf__str_by_offset(btf, ta->name_off); + nb = btf__str_by_offset(btf, tb->name_off); + anon_a = str_is_empty(na); + anon_b = str_is_empty(nb); + + if (anon_a && !anon_b) + return 1; + if (!anon_a && anon_b) + return -1; + if (anon_a && anon_b) + return 0; + + return strcmp(na, nb); +} + +/* Verifies that BTF types are sorted in ascending order according to their + * names, with named types appearing before anonymous types. If the ordering + * is correct, counts the number of named types and updates the BTF object's + * nr_sorted_types field. + * + * Return: true if types are properly sorted, false otherwise + */ +static bool btf_check_sorted(struct btf *btf) +{ + const struct btf_type *t; + int i, k = 0, n, nr_sorted_types; + + if (likely(btf->nr_sorted_types != BTF_NEED_SORT_CHECK)) + goto out; + btf->nr_sorted_types = 0; + + if (btf->nr_types < 2) + goto out; + + nr_sorted_types = 0; + n = btf__type_cnt(btf) - 1; + for (i = btf->start_id; i < n; i++) { + k = i + 1; + if (btf_compare_type_names(&i, &k, btf) > 0) + goto out; + t = btf_type_by_id(btf, i); + if (!str_is_empty(btf__str_by_offset(btf, t->name_off))) + nr_sorted_types++; + } + + t = btf_type_by_id(btf, k); + if (!str_is_empty(btf__str_by_offset(btf, t->name_off))) + nr_sorted_types++; + if (nr_sorted_types) + btf->nr_sorted_types = nr_sorted_types; + +out: + return btf->nr_sorted_types > 0; +} + /* Performs binary search within specified type ID range to find the leftmost * BTF type matching the given name. The search assumes types are sorted by * name in lexicographical order within the specified range. @@ -970,7 +1037,7 @@ static __s32 btf_find_type_by_name_kind(const struct btf *btf, int start_id, start_id = btf->start_id; } - if (btf->nr_sorted_types != BTF_NEED_SORT_CHECK) { + if (btf_check_sorted((struct btf *)btf)) { /* binary search */ __s32 end_id; bool skip_first; -- 2.34.1 From: Donglin Peng Improve btf_find_by_name_kind() performance by adding binary search support for sorted types. Falls back to linear search for compatibility. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng --- kernel/bpf/btf.c | 117 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 107 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 0de8fc8a0e0b..66cb739a0598 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -192,6 +192,8 @@ */ #define BTF_MAX_SIZE (16 * 1024 * 1024) +#define BTF_NEED_SORT_CHECK ((u32)-1) + #define for_each_member_from(i, from, struct_type, member) \ for (i = from, member = btf_type_member(struct_type) + from; \ i < btf_type_vlen(struct_type); \ @@ -259,6 +261,7 @@ struct btf { void *nohdr_data; struct btf_header hdr; u32 nr_types; /* includes VOID for base BTF */ + u32 nr_sorted_types; /* exclude VOID for base BTF */ u32 types_size; u32 data_size; refcount_t refcnt; @@ -494,6 +497,11 @@ static bool btf_type_is_modifier(const struct btf_type *t) return false; } +static int btf_start_id(const struct btf *btf) +{ + return btf->start_id + (btf->base_btf ? 0 : 1); +} + bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void; @@ -544,24 +552,110 @@ u32 btf_nr_types(const struct btf *btf) return total; } +/* Performs binary search within specified type ID range to find the leftmost + * BTF type matching the given name. The search assumes types are sorted by + * name in lexicographical order within the specified range. + * + * Return: Type ID of leftmost matching type, or -ENOENT if not found + */ +static s32 btf_find_by_name_kind_bsearch(const struct btf *btf, const char *name, + s32 start_id, s32 end_id) +{ + const struct btf_type *t; + const char *tname; + s32 l, r, m, lmost = -ENOENT; + int ret; + + l = start_id; + r = end_id; + while (l <= r) { + m = l + (r - l) / 2; + t = btf_type_by_id(btf, m); + tname = btf_name_by_offset(btf, t->name_off); + ret = strcmp(tname, name); + if (ret < 0) { + l = m + 1; + } else { + if (ret == 0) + lmost = m; + r = m - 1; + } + } + + return lmost; +} + +/* Searches for a BTF type with the specified name and kind. The function + * first recursively searches in the base BTF (if present), then searches + * in the current BTF using either binary search (if types are sorted) + * or linear search. + * + * Binary search is used when types are name-sorted (nr_sorted_types > 0). + * After finding a name match, it scans forward to find the first type + * that also matches the specified kind. Linear search is used for unsorted + * types, checking each type sequentially. + * + * Return: Type ID of matching type on success, -ENOENT if not found + */ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) { + const struct btf *base_btf = btf_base_btf(btf);; const struct btf_type *t; const char *tname; - u32 i, total; + int err = -ENOENT; - total = btf_nr_types(btf); - for (i = 1; i < total; i++) { - t = btf_type_by_id(btf, i); - if (BTF_INFO_KIND(t->info) != kind) - continue; + if (base_btf) { + err = btf_find_by_name_kind(base_btf, name, kind); + if (err > 0) + goto out; + } - tname = btf_name_by_offset(btf, t->name_off); - if (!strcmp(tname, name)) - return i; + if (btf->nr_sorted_types != BTF_NEED_SORT_CHECK) { + /* binary search */ + bool skip_first; + s32 start_id, end_id;; + int ret; + + start_id = btf_start_id(btf); + end_id = start_id + btf->nr_sorted_types - 1; + ret = btf_find_by_name_kind_bsearch(btf, name, start_id, end_id); + if (ret < 0) + goto out; + skip_first = true; + do { + t = btf_type_by_id(btf, ret); + if (BTF_INFO_KIND(t->info) != kind) { + if (skip_first) { + skip_first = false; + continue; + } + } else if (skip_first) { + return ret; + } + tname = btf_name_by_offset(btf, t->name_off); + if (!strcmp(tname, name)) + return ret; + else + break; + } while (++ret <= end_id); + } else { + /* linear search */ + u32 i, total; + + total = btf_nr_types(btf); + for (i = btf_start_id(btf); i < total; i++) { + t = btf_type_by_id(btf, i); + if (BTF_INFO_KIND(t->info) != kind) + continue; + + tname = btf_name_by_offset(btf, t->name_off); + if (!strcmp(tname, name)) + return i; + } } - return -ENOENT; +out: + return err; } s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p) @@ -5791,6 +5885,7 @@ static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uat goto errout; } env->btf = btf; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN); if (!data) { @@ -6210,6 +6305,7 @@ static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name btf->data = data; btf->data_size = data_size; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", name); err = btf_parse_hdr(env); @@ -6327,6 +6423,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, btf->start_id = base_btf->nr_types; btf->start_str_off = base_btf->hdr.str_len; btf->kernel_btf = true; + btf->nr_sorted_types = BTF_NEED_SORT_CHECK; snprintf(btf->name, sizeof(btf->name), "%s", module_name); btf->data = kvmemdup(data, data_size, GFP_KERNEL | __GFP_NOWARN); -- 2.34.1 From: Donglin Peng Implement lazy validation of BTF type ordering to enable efficient binary search for sorted BTF while maintaining linear search fallback for unsorted cases. Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng --- kernel/bpf/btf.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 66cb739a0598..33c327d3cac3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -552,6 +552,70 @@ u32 btf_nr_types(const struct btf *btf) return total; } +/* Anonymous types (with empty names) are considered greater than named types + * and are sorted after them. Two anonymous types are considered equal. Named + * types are compared lexicographically. + */ +static int btf_compare_type_names(const void *a, const void *b, void *priv) +{ + struct btf *btf = (struct btf *)priv; + const struct btf_type *ta = btf_type_by_id(btf, *(__u32 *)a); + const struct btf_type *tb = btf_type_by_id(btf, *(__u32 *)b); + const char *na, *nb; + + if (!ta->name_off && tb->name_off) + return 1; + if (ta->name_off && !tb->name_off) + return -1; + if (!ta->name_off && !tb->name_off) + return 0; + + na = btf_name_by_offset(btf, ta->name_off); + nb = btf_name_by_offset(btf, tb->name_off); + return strcmp(na, nb); +} + +/* Verifies that BTF types are sorted in ascending order according to their + * names, with named types appearing before anonymous types. If the ordering + * is correct, counts the number of named types and updates the BTF object's + * nr_sorted_types field. + * + * Return: true if types are properly sorted, false otherwise + */ +static bool btf_check_sorted(struct btf *btf) +{ + const struct btf_type *t; + int i, n, k = 0, nr_sorted_types; + + if (likely(btf->nr_sorted_types != BTF_NEED_SORT_CHECK)) + goto out; + btf->nr_sorted_types = 0; + + if (btf->nr_types < 2) + goto out; + + nr_sorted_types = 0; + n = btf_nr_types(btf) - 1; + for (i = btf_start_id(btf); i < n; i++) { + k = i + 1; + if (btf_compare_type_names(&i, &k, btf) > 0) + goto out; + + t = btf_type_by_id(btf, i); + if (t->name_off) + nr_sorted_types++; + } + + t = btf_type_by_id(btf, k); + if (t->name_off) + nr_sorted_types++; + if (nr_sorted_types) + btf->nr_sorted_types = nr_sorted_types; + +out: + return btf->nr_sorted_types > 0; +} + /* Performs binary search within specified type ID range to find the leftmost * BTF type matching the given name. The search assumes types are sorted by * name in lexicographical order within the specified range. @@ -610,7 +674,7 @@ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind) goto out; } - if (btf->nr_sorted_types != BTF_NEED_SORT_CHECK) { + if (btf_check_sorted((struct btf *)btf)) { /* binary search */ bool skip_first; s32 start_id, end_id;; -- 2.34.1 From: Donglin Peng This patch introduces test cases for the btf__permute function to ensure it works correctly with both base BTF and split BTF scenarios. The test suite includes: - test_permute_base: Validates permutation on base BTF - test_permute_split: Tests permutation on split BTF - test_permute_drop_base: Validates type dropping on base BTF - test_permute_drop_split: Tests type dropping on split BTF Cc: Eduard Zingerman Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Alan Maguire Cc: Song Liu Cc: Xiaoqin Zhang Signed-off-by: Donglin Peng Signed-off-by: Donglin Peng Acked-by: Eduard Zingerman --- .../selftests/bpf/prog_tests/btf_permute.c | 279 ++++++++++++++++++ 1 file changed, 279 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/btf_permute.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_permute.c b/tools/testing/selftests/bpf/prog_tests/btf_permute.c new file mode 100644 index 000000000000..edab03742598 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_permute.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Xiaomi */ + +#include +#include +#include "btf_helpers.h" + +/* Ensure btf__permute work as expected with base_btf */ +static void test_permute_base(void) +{ + struct btf *btf; + __u32 permute_ids[6]; + int err; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_main_btf")) + return; + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf, 1); /* [2] ptr to int */ + btf__add_struct(btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(btf, 1); /* [5] int (*)(int *p); */ + btf__add_func_param(btf, "p", 2); + btf__add_func(btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 4; /* struct s2 */ + permute_ids[1] = 3; /* struct s1 */ + permute_ids[2] = 5; /* int (*)(int *p) */ + permute_ids[3] = 1; /* int */ + permute_ids[4] = 6; /* int f(int *p) */ + permute_ids[5] = 2; /* ptr to int */ + err = btf__permute(btf, permute_ids, ARRAY_SIZE(permute_ids), NULL); + if (!ASSERT_OK(err, "btf__permute_base")) + goto done; + + VALIDATE_RAW_BTF( + btf, + "[1] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=4 bits_offset=0", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=4 bits_offset=0", + "[3] FUNC_PROTO '(anon)' ret_type_id=4 vlen=1\n" + "\t'p' type_id=6", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[5] FUNC 'f' type_id=3 linkage=static", + "[6] PTR '(anon)' type_id=4"); + +done: + btf__free(btf); +} + +/* Ensure btf__permute work as expected with split_btf */ +static void test_permute_split(void) +{ + struct btf *split_btf = NULL, *base_btf = NULL; + __u32 permute_ids[4]; + int err; + + base_btf = btf__new_empty(); + if (!ASSERT_OK_PTR(base_btf, "empty_main_btf")) + return; + + btf__add_int(base_btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(base_btf, 1); /* [2] ptr to int */ + VALIDATE_RAW_BTF( + base_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + split_btf = btf__new_empty_split(base_btf); + if (!ASSERT_OK_PTR(split_btf, "empty_split_btf")) + goto cleanup; + btf__add_struct(split_btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(split_btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(split_btf, 1); /* [5] int (*)(int p); */ + btf__add_func_param(split_btf, "p", 2); + btf__add_func(split_btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 6; /* int f(int *p) */ + permute_ids[1] = 3; /* struct s1 */ + permute_ids[2] = 5; /* int (*)(int *p) */ + permute_ids[3] = 4; /* struct s2 */ + err = btf__permute(split_btf, permute_ids, ARRAY_SIZE(permute_ids), NULL); + if (!ASSERT_OK(err, "btf__permute_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] FUNC 'f' type_id=5 linkage=static", + "[4] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0"); + +cleanup: + btf__free(split_btf); + btf__free(base_btf); +} + +/* Verify btf__permute function drops types correctly with base_btf */ +static void test_permute_drop_base(void) +{ + struct btf *btf; + __u32 permute_ids[6]; + int err; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_main_btf")) + return; + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf, 1); /* [2] ptr to int */ + btf__add_struct(btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(btf, 1); /* [5] int (*)(int *p); */ + btf__add_func_param(btf, "p", 2); + btf__add_func(btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 4; /* struct s2 */ + permute_ids[1] = 2; /* ptr to int */ + permute_ids[2] = 5; /* int (*)(int *p) */ + permute_ids[3] = 1; /* int */ + err = btf__permute(btf, permute_ids, 4, NULL); + if (!ASSERT_OK(err, "btf__permute_drop_base")) + goto done; + + VALIDATE_RAW_BTF( + btf, + "[1] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=4 bits_offset=0", + "[2] PTR '(anon)' type_id=4", + "[3] FUNC_PROTO '(anon)' ret_type_id=4 vlen=1\n" + "\t'p' type_id=2", + "[4] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + + permute_ids[0] = 4; /* struct s2 */ + permute_ids[1] = 5; /* int (*)(int *p) */ + permute_ids[2] = 1; /* int */ + err = btf__permute(btf, permute_ids, 3, NULL); + if (!ASSERT_ERR(err, "btf__permute_drop_base_fail")) + goto done; + +done: + btf__free(btf); +} + +/* Verify btf__permute function drops types correctly with split_btf */ +static void test_permute_drop_split(void) +{ + struct btf *split_btf = NULL, *base_btf = NULL; + __u32 permute_ids[4]; + int err; + + base_btf = btf__new_empty(); + if (!ASSERT_OK_PTR(base_btf, "empty_main_btf")) + return; + + btf__add_int(base_btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(base_btf, 1); /* [2] ptr to int */ + VALIDATE_RAW_BTF( + base_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + split_btf = btf__new_empty_split(base_btf); + if (!ASSERT_OK_PTR(split_btf, "empty_split_btf")) + goto cleanup; + btf__add_struct(split_btf, "s1", 4); /* [3] struct s1 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_struct(split_btf, "s2", 4); /* [4] struct s2 { */ + btf__add_field(split_btf, "m", 1, 0, 0); /* int m; */ + /* } */ + btf__add_func_proto(split_btf, 1); /* [5] int (*)(int p); */ + btf__add_func_param(split_btf, "p", 2); + btf__add_func(split_btf, "f", BTF_FUNC_STATIC, 5); /* [6] int f(int *p); */ + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[4] STRUCT 's2' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2", + "[6] FUNC 'f' type_id=5 linkage=static"); + + permute_ids[0] = 6; /* int f(int *p) */ + permute_ids[1] = 3; /* struct s1 */ + permute_ids[2] = 5; /* int (*)(int *p) */ + err = btf__permute(split_btf, permute_ids, 3, NULL); + if (!ASSERT_OK(err, "btf__permute_drop_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + split_btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] FUNC 'f' type_id=5 linkage=static", + "[4] STRUCT 's1' size=4 vlen=1\n" + "\t'm' type_id=1 bits_offset=0", + "[5] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p' type_id=2"); + + permute_ids[0] = 6; /* int f(int *p) */ + permute_ids[1] = 3; /* struct s1 */ + err = btf__permute(split_btf, permute_ids, 2, NULL); + if (!ASSERT_ERR(err, "btf__permute_drop_split_fail")) + goto cleanup; + +cleanup: + btf__free(split_btf); + btf__free(base_btf); +} + +void test_btf_permute(void) +{ + if (test__start_subtest("permute_base")) + test_permute_base(); + if (test__start_subtest("permute_split")) + test_permute_split(); + if (test__start_subtest("permute_drop_base")) + test_permute_drop_base(); + if (test__start_subtest("permute_drop_split")) + test_permute_drop_split(); +} -- 2.34.1