On systems with tiered memory, there is currently no tracking of memory at the tier-memcg granularity. While per-memcg-lruvec serves at a finer granularity that can be accumulated to give us the desired per-tier-memcg accounting, relying on these lruvec stats for limit checking can prove touch too many hot paths too frequently and can introduce increased latency for other memcg users. Instead, add a new cacheline in struct page_counter to track toptier memcg limits and usage, as well as cached capacity values. This cacheline is only used by the mem_cgroup->memory page_counter. Also, introduce helpers that use these new fields to calculate proportional toptier high and low values, based on the system's toptier:total capacity ratio. Signed-off-by: Joshua Hahn --- include/linux/page_counter.h | 22 +++++++++++++++++++++- mm/page_counter.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index d649b6bbbc87..128c1272c88c 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -5,6 +5,7 @@ #include #include #include +#include #include struct page_counter { @@ -31,9 +32,23 @@ struct page_counter { /* Latest cg2 reset watermark */ unsigned long local_watermark; - /* Keep all the read most fields in a separete cacheline. */ + /* Keep all the tiered memory fields in a separate cacheline. */ CACHELINE_PADDING(_pad2_); + atomic_long_t toptier_usage; + + /* effective toptier-proportional low protection */ + unsigned long etoptier_low; + atomic_long_t toptier_low_usage; + atomic_long_t children_toptier_low_usage; + + /* Cached toptier capacity for proportional limit calculations */ + unsigned long toptier_capacity; + unsigned long total_capacity; + + /* Keep all the read most fields in a separate cacheline. */ + CACHELINE_PADDING(_pad3_); + bool protection_support; bool track_failcnt; unsigned long min; @@ -61,6 +76,9 @@ static inline void page_counter_init(struct page_counter *counter, counter->parent = parent; counter->protection_support = protection_support; counter->track_failcnt = false; + counter->toptier_usage = (atomic_long_t)ATOMIC_LONG_INIT(0); + counter->toptier_capacity = 0; + counter->total_capacity = 0; } static inline unsigned long page_counter_read(struct page_counter *counter) @@ -103,6 +121,8 @@ static inline void page_counter_reset_watermark(struct page_counter *counter) void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection); +unsigned long page_counter_toptier_high(struct page_counter *counter); +unsigned long page_counter_toptier_low(struct page_counter *counter); #else static inline void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, diff --git a/mm/page_counter.c b/mm/page_counter.c index 661e0f2a5127..5ec97811c418 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -462,4 +462,38 @@ void page_counter_calculate_protection(struct page_counter *root, atomic_long_read(&parent->children_low_usage), recursive_protection)); } + +unsigned long page_counter_toptier_high(struct page_counter *counter) +{ + unsigned long high = READ_ONCE(counter->high); + unsigned long toptier_cap, total_cap; + + if (high == PAGE_COUNTER_MAX) + return PAGE_COUNTER_MAX; + + toptier_cap = counter->toptier_capacity; + total_cap = counter->total_capacity; + + if (!total_cap) + return PAGE_COUNTER_MAX; + + return mult_frac(high, toptier_cap, total_cap); +} + +unsigned long page_counter_toptier_low(struct page_counter *counter) +{ + unsigned long low = READ_ONCE(counter->low); + unsigned long toptier_cap, total_cap; + + if (!low) + return 0; + + toptier_cap = counter->toptier_capacity; + total_cap = counter->total_capacity; + + if (!total_cap) + return 0; + + return mult_frac(low, toptier_cap, total_cap); +} #endif /* CONFIG_MEMCG || CONFIG_CGROUP_DMEM */ -- 2.47.3