In production, show_mem() can be called concurrently from two different entities, for example one from oom_kill_process() another from __alloc_pages_slowpath from another kthread. This patch adds a mutex and invokes trylock before printing out the kernel alloc info in show_mem(). This way two alloc info won't interleave with each other, which then makes parsing easier. Signed-off-by: Yueyang Pan --- mm/show_mem.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/show_mem.c b/mm/show_mem.c index b71e222fde86..8814b5f8a7dc 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -23,6 +23,8 @@ EXPORT_SYMBOL(_totalram_pages); unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; +static DEFINE_MUTEX(mem_alloc_profiling_mutex); + static inline void show_node(struct zone *zone) { if (IS_ENABLED(CONFIG_NUMA)) @@ -419,7 +421,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif #ifdef CONFIG_MEM_ALLOC_PROFILING - if (mem_alloc_profiling_enabled()) { + if (mem_alloc_profiling_enabled() && mutex_trylock(&mem_alloc_profiling_mutex)) { struct codetag_bytes tags[10]; size_t i, nr; @@ -445,6 +447,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) ct->lineno, ct->function); } } + mutex_unlock(&mem_alloc_profiling_mutex); } #endif } -- 2.47.3