The bpf_arena_spin_lock.h header is useful for all programs and not just the selftests. Move it to the top level of the BPF selftests to make it more readily accessible. Signed-off-by: Emil Tsalapatis (Meta) --- tools/testing/selftests/bpf/{progs => }/bpf_arena_spin_lock.h | 4 ++-- tools/testing/selftests/bpf/progs/arena_spin_lock.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename tools/testing/selftests/bpf/{progs => }/bpf_arena_spin_lock.h (99%) diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/bpf_arena_spin_lock.h similarity index 99% rename from tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h rename to tools/testing/selftests/bpf/bpf_arena_spin_lock.h index f90531cf3ee5..680c9e6cb35d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h +++ b/tools/testing/selftests/bpf/bpf_arena_spin_lock.h @@ -107,7 +107,7 @@ struct arena_qnode { #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) -struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES]; +struct arena_qnode __weak __arena __hidden qnodes[_Q_MAX_CPUS][_Q_MAX_NODES]; static inline u32 encode_tail(int cpu, int idx) { @@ -240,7 +240,7 @@ static __always_inline int arena_spin_trylock(arena_spinlock_t __arena *lock) return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); } -__noinline +__noinline __weak int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val) { struct arena_mcs_spinlock __arena *prev, *next, *node0, *node; diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c index 086b57a426cf..6c04e7707644 100644 --- a/tools/testing/selftests/bpf/progs/arena_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c @@ -4,7 +4,7 @@ #include #include #include "bpf_misc.h" -#include "bpf_arena_spin_lock.h" +#include "../bpf_arena_spin_lock.h" struct { __uint(type, BPF_MAP_TYPE_ARENA); -- 2.47.3