This patch is a result of our long-standing debug sessions, where it all started as "networking is slow", and TCP network throughput suddenly dropped from tens of Gbps to few Mbps, and we could not see anything in the kernel log or netstat counters. Currently, we have two memory pressure counters for TCP sockets [1], which we manipulate only when the memory pressure is signalled through the proto struct [2]. However, the memory pressure can also be signaled through the cgroup memory subsystem, which we do not reflect in the netstat counters. In the end, when the cgroup memory subsystem signals that it is under pressure, we silently reduce the advertised TCP window with tcp_adjust_rcv_ssthresh() to 4*advmss, which causes a significant throughput reduction. So this patch adds a new counter to account for memory pressure signaled by the memory cgroup, so it is much easier to spot. Link: https://elixir.bootlin.com/linux/v6.15.4/source/include/uapi/linux/snmp.h#L231-L232 [1] Link: https://elixir.bootlin.com/linux/v6.15.4/source/include/net/sock.h#L1300-L1301 [2] Co-developed-by: Matyas Hurtik Signed-off-by: Matyas Hurtik Signed-off-by: Daniel Sedlak --- Documentation/networking/net_cachelines/snmp.rst | 1 + include/net/tcp.h | 14 ++++++++------ include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst index bd44b3eebbef..ed17ff84e39c 100644 --- a/Documentation/networking/net_cachelines/snmp.rst +++ b/Documentation/networking/net_cachelines/snmp.rst @@ -76,6 +76,7 @@ unsigned_long LINUX_MIB_TCPABORTONLINGER unsigned_long LINUX_MIB_TCPABORTFAILED unsigned_long LINUX_MIB_TCPMEMORYPRESSURES unsigned_long LINUX_MIB_TCPMEMORYPRESSURESCHRONO +unsigned_long LINUX_MIB_TCPCGROUPSOCKETPRESSURE unsigned_long LINUX_MIB_TCPSACKDISCARD unsigned_long LINUX_MIB_TCPDSACKIGNOREDOLD unsigned_long LINUX_MIB_TCPDSACKIGNOREDNOUNDO diff --git a/include/net/tcp.h b/include/net/tcp.h index 761c4a0ad386..aae3efe24282 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -267,6 +267,11 @@ extern long sysctl_tcp_mem[3]; #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */ +#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) +#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) +#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) +#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) + extern atomic_long_t tcp_memory_allocated; DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); @@ -277,8 +282,10 @@ extern unsigned long tcp_memory_pressure; static inline bool tcp_under_memory_pressure(const struct sock *sk) { if (mem_cgroup_sockets_enabled && sk->sk_memcg && - mem_cgroup_under_socket_pressure(sk->sk_memcg)) + mem_cgroup_under_socket_pressure(sk->sk_memcg)) { + TCP_INC_STATS(sock_net(sk), LINUX_MIB_TCPCGROUPSOCKETPRESSURE); return true; + } return READ_ONCE(tcp_memory_pressure); } @@ -316,11 +323,6 @@ bool tcp_check_oom(const struct sock *sk, int shift); extern struct proto tcp_prot; -#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) -#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) -#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) -#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) - void tcp_tsq_work_init(void); int tcp_v4_err(struct sk_buff *skb, u32); diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 1d234d7e1892..9e8d1a5e56a9 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -231,6 +231,7 @@ enum LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */ LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */ LINUX_MIB_TCPMEMORYPRESSURESCHRONO, /* TCPMemoryPressuresChrono */ + LINUX_MIB_TCPCGROUPSOCKETPRESSURE, /* TCPCgroupSocketPressure */ LINUX_MIB_TCPSACKDISCARD, /* TCPSACKDiscard */ LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index ea2f01584379..0bcec9a51fb0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -235,6 +235,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED), SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES), SNMP_MIB_ITEM("TCPMemoryPressuresChrono", LINUX_MIB_TCPMEMORYPRESSURESCHRONO), + SNMP_MIB_ITEM("TCPCgroupSocketPressure", LINUX_MIB_TCPCGROUPSOCKETPRESSURE), SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD), SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD), SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO), -- 2.39.5 From: Matyas Hurtik When the vmpressure function marks all sockets within a particular cgroup as under pressure, it can silently reduce network throughput significantly. This socket pressure is not currently signaled in any way to the users, and it is difficult to detect which cgroup is under socket pressure. This patch adds a new tracepoint that is called when a cgroup is under socket pressure. Signed-off-by: Matyas Hurtik Co-developed-by: Daniel Sedlak Signed-off-by: Daniel Sedlak --- include/trace/events/memcg.h | 25 +++++++++++++++++++++++++ mm/vmpressure.c | 3 +++ 2 files changed, 28 insertions(+) diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h index dfe2f51019b4..19a51db73913 100644 --- a/include/trace/events/memcg.h +++ b/include/trace/events/memcg.h @@ -100,6 +100,31 @@ TRACE_EVENT(memcg_flush_stats, __entry->force, __entry->needs_flush) ); +TRACE_EVENT(memcg_socket_under_pressure, + + TP_PROTO(const struct mem_cgroup *memcg, unsigned long scanned, + unsigned long reclaimed), + + TP_ARGS(memcg, scanned, reclaimed), + + TP_STRUCT__entry( + __field(u64, id) + __field(unsigned long, scanned) + __field(unsigned long, reclaimed) + ), + + TP_fast_assign( + __entry->id = cgroup_id(memcg->css.cgroup); + __entry->scanned = scanned; + __entry->reclaimed = reclaimed; + ), + + TP_printk("memcg_id=%llu scanned=%lu reclaimed=%lu", + __entry->id, + __entry->scanned, + __entry->reclaimed) +); + #endif /* _TRACE_MEMCG_H */ /* This part must be outside protection */ diff --git a/mm/vmpressure.c b/mm/vmpressure.c index bd5183dfd879..aa9583066731 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -21,6 +21,8 @@ #include #include +#include + /* * The window size (vmpressure_win) is the number of scanned pages before * we try to analyze scanned/reclaimed ratio. So the window is used as a @@ -317,6 +319,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, * pressure events can occur. */ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); + trace_memcg_socket_under_pressure(memcg, scanned, reclaimed); } } } -- 2.39.5