Three functions (damos_apply_scheme, damos_trace_esz, damos_trace_stat) contain identical loops walking the schemes list to find a scheme's index for tracing. Extract the common pattern into damon_scheme_idx(). Signed-off-by: Josh Law --- mm/damon/core.c | 41 +++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 6ee421141996..56372e577931 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2045,6 +2045,19 @@ static void damos_walk_cancel(struct damon_ctx *ctx) mutex_unlock(&ctx->walk_control_lock); } +static unsigned int damon_scheme_idx(struct damon_ctx *c, struct damos *s) +{ + unsigned int idx = 0; + struct damos *siter; + + damon_for_each_scheme(siter, c) { + if (siter == s) + break; + idx++; + } + return idx; +} + static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct damon_region *r, struct damos *s) { @@ -2060,7 +2073,6 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, * index here. */ unsigned int cidx = 0; - struct damos *siter; /* schemes iterator */ unsigned int sidx = 0; struct damon_target *titer; /* targets iterator */ unsigned int tidx = 0; @@ -2068,11 +2080,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, /* get indices for trace_damos_before_apply() */ if (trace_damos_before_apply_enabled()) { - damon_for_each_scheme(siter, c) { - if (siter == s) - break; - sidx++; - } + sidx = damon_scheme_idx(c, s); damon_for_each_target(titer, c) { if (titer == t) break; @@ -2394,15 +2402,7 @@ static void damos_set_effective_quota(struct damos_quota *quota) static void damos_trace_esz(struct damon_ctx *c, struct damos *s, struct damos_quota *quota) { - unsigned int cidx = 0, sidx = 0; - struct damos *siter; - - damon_for_each_scheme(siter, c) { - if (siter == s) - break; - sidx++; - } - trace_damos_esz(cidx, sidx, quota->esz); + trace_damos_esz(0, damon_scheme_idx(c, s), quota->esz); } static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) @@ -2469,18 +2469,11 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) static void damos_trace_stat(struct damon_ctx *c, struct damos *s) { - unsigned int cidx = 0, sidx = 0; - struct damos *siter; - if (!trace_damos_stat_after_apply_interval_enabled()) return; - damon_for_each_scheme(siter, c) { - if (siter == s) - break; - sidx++; - } - trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat); + trace_damos_stat_after_apply_interval(0, damon_scheme_idx(c, s), + &s->stat); } static void kdamond_apply_schemes(struct damon_ctx *c) -- 2.34.1 damon_commit_target_regions() walks the source region list twice: once to count regions and again to copy their address ranges. Use the already-maintained damon_nr_regions() instead of the counting traversal. Signed-off-by: Josh Law --- mm/damon/core.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 56372e577931..6c1f738e265c 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1217,17 +1217,15 @@ static int damon_commit_target_regions(struct damon_target *dst, { struct damon_region *src_region; struct damon_addr_range *ranges; + unsigned int nr = damon_nr_regions(src); int i = 0, err; - damon_for_each_region(src_region, src) - i++; - if (!i) + if (!nr) return 0; - ranges = kmalloc_objs(*ranges, i, GFP_KERNEL | __GFP_NOWARN); + ranges = kmalloc_objs(*ranges, nr, GFP_KERNEL | __GFP_NOWARN); if (!ranges) return -ENOMEM; - i = 0; damon_for_each_region(src_region, src) ranges[i++] = src_region->ar; err = damon_set_regions(dst, ranges, i, src_min_region_sz); -- 2.34.1 kdamond_split_regions() uses a function-level static variable to remember the previous iteration's region count. This is shared across all kdamond threads, so one kdamond's region count leaks into another's splitting decision. Move the variable into struct damon_ctx so each kdamond tracks its own state. Signed-off-by: Josh Law --- include/linux/damon.h | 2 ++ mm/damon/core.c | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 438fe6f3eab4..85fe33ce7be4 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -803,6 +803,8 @@ struct damon_ctx { struct completion kdamond_started; /* for scheme quotas prioritization */ unsigned long *regions_score_histogram; + /* for kdamond_split_regions() heuristic */ + unsigned int last_nr_regions; /* lists of &struct damon_call_control */ struct list_head call_controls; diff --git a/mm/damon/core.c b/mm/damon/core.c index 6c1f738e265c..50e8bdeb70dd 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -2722,7 +2722,6 @@ static void kdamond_split_regions(struct damon_ctx *ctx) { struct damon_target *t; unsigned int nr_regions = 0; - static unsigned int last_nr_regions; int nr_subregions = 2; damon_for_each_target(t, ctx) @@ -2732,14 +2731,14 @@ static void kdamond_split_regions(struct damon_ctx *ctx) return; /* Maybe the middle of the region has different access frequency */ - if (last_nr_regions == nr_regions && + if (ctx->last_nr_regions == nr_regions && nr_regions < ctx->attrs.max_nr_regions / 3) nr_subregions = 3; damon_for_each_target(t, ctx) damon_split_regions_of(t, nr_subregions, ctx->min_region_sz); - last_nr_regions = nr_regions; + ctx->last_nr_regions = nr_regions; } /* -- 2.34.1