In production, show_mem() can be called concurrently from two different entities, for example one from oom_kill_process() another from __alloc_pages_slowpath from another kthread. This patch adds a spinlock and invokes trylock before printing out the kernel alloc info in show_mem(). This way two alloc info won't interleave with each other, which then makes parsing easier. Signed-off-by: Yueyang Pan --- mm/show_mem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/show_mem.c b/mm/show_mem.c index 51892ce2efc4..4c876ea2b66f 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -396,6 +396,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) { + static DEFINE_SPINLOCK(mem_alloc_profiling_spinlock); unsigned long total = 0, reserved = 0, highmem = 0; struct zone *zone; @@ -421,7 +422,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif #ifdef CONFIG_MEM_ALLOC_PROFILING - { + if (spin_trylock(&mem_alloc_profiling_spinlock)) { struct codetag_bytes tags[10]; size_t i, nr; @@ -449,6 +450,7 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) ct->lineno, ct->function); } } + spin_unlock(&mem_alloc_profiling_spinlock); } #endif } -- 2.47.3