Introduce an optional damon_operations callback `get_goal_metric()` that lets ops providers compute goal metrics requiring address-space knowledge. Provide a PA implementation that handles DAMOS_QUOTA_NODE_SYS_BP by iterating the monitored PFN regions and attributing bytes to the goal's nid. Core remains generic and asks ops only when needed. Signed-off-by: Ravi Jonnalagadda --- include/linux/damon.h | 3 +++ mm/damon/paddr.c | 58 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index ec5ed1a217fc..67233898c27c 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -649,6 +649,9 @@ struct damon_operations { bool (*target_valid)(struct damon_target *t); void (*cleanup_target)(struct damon_target *t); void (*cleanup)(struct damon_ctx *context); + unsigned long (*get_goal_metric)(struct damon_ctx *ctx, + struct damos *scheme, + const struct damos_quota_goal *goal); }; /* diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 07a8aead439e..30e4e5663dcb 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "../internal.h" #include "ops-common.h" @@ -148,6 +150,48 @@ static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s) return false; } +/* System total RAM in bytes (denominator for bp computation) */ +static unsigned long damon_pa_totalram_bytes(void) +{ + return (unsigned long)totalram_pages() << PAGE_SHIFT; +} + +/* + * Compute node-scoped system bp for PA contexts: + * bp = (bytes attributed to goal->nid across monitored PA regions) / + * (system total bytes) * 10000 + */ +static unsigned long damon_pa_get_node_sys_bp(struct damon_ctx *ctx, + struct damos *scheme, + const struct damos_quota_goal *goal) +{ + int nid = goal ? goal->nid : -1; + unsigned long node_bytes = 0; + unsigned long total_bytes = damon_pa_totalram_bytes(); + struct damon_target *t; + struct damon_region *r; + + if (nid < 0 || !total_bytes) + return 0; + + damon_for_each_target(t, ctx) { + damon_for_each_region(r, t) { + unsigned long start_pfn = r->ar.start >> PAGE_SHIFT; + unsigned long end_pfn = r->ar.end >> PAGE_SHIFT; + unsigned long pfn; + + for (pfn = start_pfn; pfn < end_pfn; pfn++) { + if (!pfn_valid(pfn)) + continue; + if (page_to_nid(pfn_to_page(pfn)) == nid) + node_bytes += PAGE_SIZE; + } + } + } + + return div64_u64((u64)node_bytes * 10000ULL, total_bytes); +} + static unsigned long damon_pa_pageout(struct damon_region *r, unsigned long addr_unit, struct damos *s, unsigned long *sz_filter_passed) @@ -344,6 +388,19 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, return 0; } +/* Generic goal-metric provider for PA */ +static unsigned long damon_pa_get_goal_metric(struct damon_ctx *ctx, + struct damos *scheme, + const struct damos_quota_goal *goal) +{ + switch (goal ? goal->metric : -1) { + case DAMOS_QUOTA_NODE_SYS_BP: + return damon_pa_get_node_sys_bp(ctx, scheme, goal); + default: + return 0; + } +} + static int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t, struct damon_region *r, struct damos *scheme) @@ -378,6 +435,7 @@ static int __init damon_pa_initcall(void) .cleanup = NULL, .apply_scheme = damon_pa_apply_scheme, .get_scheme_score = damon_pa_scheme_score, + .get_goal_metric = damon_pa_get_goal_metric, }; return damon_register_ops(&ops); -- 2.43.0