Enable capability analysis for stackdepot. Signed-off-by: Marco Elver --- v2: * Remove disable/enable_capability_analysis() around headers. --- lib/Makefile | 1 + lib/stackdepot.c | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lib/Makefile b/lib/Makefile index e677cb5cc777..43b965046c2c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -251,6 +251,7 @@ obj-$(CONFIG_POLYNOMIAL) += polynomial.o # Prevent the compiler from calling builtins like memcmp() or bcmp() from this # file. CFLAGS_stackdepot.o += -fno-builtin +CAPABILITY_ANALYSIS_stackdepot.o := y obj-$(CONFIG_STACKDEPOT) += stackdepot.o KASAN_SANITIZE_stackdepot.o := n # In particular, instrumenting stackdepot.c with KMSAN will result in infinite diff --git a/lib/stackdepot.c b/lib/stackdepot.c index de0b0025af2b..43122294f128 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -61,18 +61,18 @@ static unsigned int stack_bucket_number_order; /* Hash mask for indexing the table. */ static unsigned int stack_hash_mask; +/* The lock must be held when performing pool or freelist modifications. */ +static DEFINE_RAW_SPINLOCK(pool_lock); /* Array of memory regions that store stack records. */ -static void **stack_pools; +static void **stack_pools __pt_guarded_by(&pool_lock); /* Newly allocated pool that is not yet added to stack_pools. */ static void *new_pool; /* Number of pools in stack_pools. */ static int pools_num; /* Offset to the unused space in the currently used pool. */ -static size_t pool_offset = DEPOT_POOL_SIZE; +static size_t pool_offset __guarded_by(&pool_lock) = DEPOT_POOL_SIZE; /* Freelist of stack records within stack_pools. */ -static LIST_HEAD(free_stacks); -/* The lock must be held when performing pool or freelist modifications. */ -static DEFINE_RAW_SPINLOCK(pool_lock); +static __guarded_by(&pool_lock) LIST_HEAD(free_stacks); /* Statistics counters for debugfs. */ enum depot_counter_id { @@ -291,6 +291,7 @@ EXPORT_SYMBOL_GPL(stack_depot_init); * Initializes new stack pool, and updates the list of pools. */ static bool depot_init_pool(void **prealloc) + __must_hold(&pool_lock) { lockdep_assert_held(&pool_lock); @@ -338,6 +339,7 @@ static bool depot_init_pool(void **prealloc) /* Keeps the preallocated memory to be used for a new stack depot pool. */ static void depot_keep_new_pool(void **prealloc) + __must_hold(&pool_lock) { lockdep_assert_held(&pool_lock); @@ -357,6 +359,7 @@ static void depot_keep_new_pool(void **prealloc) * the current pre-allocation. */ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) + __must_hold(&pool_lock) { struct stack_record *stack; void *current_pool; @@ -391,6 +394,7 @@ static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) /* Try to find next free usable entry from the freelist. */ static struct stack_record *depot_pop_free(void) + __must_hold(&pool_lock) { struct stack_record *stack; @@ -428,6 +432,7 @@ static inline size_t depot_stack_record_size(struct stack_record *s, unsigned in /* Allocates a new stack in a stack depot pool. */ static struct stack_record * depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc) + __must_hold(&pool_lock) { struct stack_record *stack = NULL; size_t record_size; @@ -486,6 +491,7 @@ depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, dep } static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) + __must_not_hold(&pool_lock) { const int pools_num_cached = READ_ONCE(pools_num); union handle_parts parts = { .handle = handle }; @@ -502,7 +508,8 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) return NULL; } - pool = stack_pools[pool_index]; + /* @pool_index either valid, or user passed in corrupted value. */ + pool = capability_unsafe(stack_pools[pool_index]); if (WARN_ON(!pool)) return NULL; @@ -515,6 +522,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) /* Links stack into the freelist. */ static void depot_free_stack(struct stack_record *stack) + __must_not_hold(&pool_lock) { unsigned long flags; -- 2.51.0.384.g4c02a37b29-goog