kdamond_split_regions() uses a function-level static variable to remember the previous iteration's region count. This is shared across all kdamond threads, so one kdamond's region count leaks into another's splitting decision. Move the variable into struct damon_ctx so each kdamond tracks its own state. Signed-off-by: Josh Law --- include/linux/damon.h | 2 ++ mm/damon/core.c | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 438fe6f3eab4..85fe33ce7be4 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -803,6 +803,8 @@ struct damon_ctx { struct completion kdamond_started; /* for scheme quotas prioritization */ unsigned long *regions_score_histogram; + /* for kdamond_split_regions() heuristic */ + unsigned int last_nr_regions; /* lists of &struct damon_call_control */ struct list_head call_controls; diff --git a/mm/damon/core.c b/mm/damon/core.c index 6c1f738e265c..50e8bdeb70dd 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2722,7 +2722,6 @@ static void kdamond_split_regions(struct damon_ctx *ctx) { struct damon_target *t; unsigned int nr_regions = 0; - static unsigned int last_nr_regions; int nr_subregions = 2; damon_for_each_target(t, ctx) @@ -2732,14 +2731,14 @@ static void kdamond_split_regions(struct damon_ctx *ctx) return; /* Maybe the middle of the region has different access frequency */ - if (last_nr_regions == nr_regions && + if (ctx->last_nr_regions == nr_regions && nr_regions < ctx->attrs.max_nr_regions / 3) nr_subregions = 3; damon_for_each_target(t, ctx) damon_split_regions_of(t, nr_subregions, ctx->min_region_sz); - last_nr_regions = nr_regions; + ctx->last_nr_regions = nr_regions; } /* -- 2.34.1