state_show() currently reads kdamond->damon_ctx without holding damon_sysfs_lock. This creates a use-after-free race condition: CPU 0 CPU 1 ----- ----- state_show() damon_sysfs_turn_damon_on() ctx = kdamond->damon_ctx; mutex_lock(&damon_sysfs_lock); damon_destroy_ctx(kdamond->damon_ctx); kdamond->damon_ctx = NULL; mutex_unlock(&damon_sysfs_lock); damon_is_running(ctx); /* ctx is freed */ mutex_lock(&ctx->kdamond_lock); /* UAF */ The race can occur with other functions that free or replace the context while holding damon_sysfs_lock, such as damon_sysfs_kdamonds_rm_dirs() and damon_sysfs_kdamond_release(). Fix this by acquiring damon_sysfs_lock before accessing the context, mirroring the locking pattern used in pid_show(). This vulnerability was present when state_show() was first introduced to access kdamond->damon_ctx. Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring") Reported-by: Stanislav Fort Signed-off-by: Stanislav Fort --- mm/damon/sysfs.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 1234567..abcdef0 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1258,17 +1258,24 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct damon_sysfs_kdamond *kdamond = container_of(kobj, struct damon_sysfs_kdamond, kobj); - struct damon_ctx *ctx = kdamond->damon_ctx; - bool running; + struct damon_ctx *ctx; + bool running = false; + + if (!mutex_trylock(&damon_sysfs_lock)) + return -EBUSY; + + ctx = kdamond->damon_ctx; + if (ctx) + running = damon_is_running(ctx); - if (!ctx) - running = false; - else - running = damon_is_running(ctx); + mutex_unlock(&damon_sysfs_lock); return sysfs_emit(buf, "%s\n", running ? damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] : damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]); } static int damon_sysfs_set_attrs(struct damon_ctx *ctx, -- 2.34.1