The eval_map_work_func() function, though queued in eval_map_wq, holds the trace_event_sem read-write lock for a long time during kernel boot. This causes blocking issues for other functions. Rename eval_map_wq to trace_init_wq and export it, thereby allowing other modules to schedule work on this queue asynchronously and avoiding blockage of the main boot thread. Signed-off-by: Yaxiong Tian --- kernel/trace/trace.c | 18 +++++++++--------- kernel/trace/trace.h | 2 ++ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e18005807395..c61e30cb7339 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -10774,7 +10774,7 @@ int tracing_init_dentry(void) extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; -static struct workqueue_struct *eval_map_wq __initdata; +struct workqueue_struct *trace_init_wq __initdata; static struct work_struct eval_map_work __initdata; static struct work_struct tracerfs_init_work __initdata; @@ -10790,15 +10790,15 @@ static int __init trace_eval_init(void) { INIT_WORK(&eval_map_work, eval_map_work_func); - eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); - if (!eval_map_wq) { - pr_err("Unable to allocate eval_map_wq\n"); + trace_init_wq = alloc_workqueue("trace_init_wq", WQ_UNBOUND, 0); + if (!trace_init_wq) { + pr_err("Unable to allocate trace_init_wq\n"); /* Do work here */ eval_map_work_func(&eval_map_work); return -ENOMEM; } - queue_work(eval_map_wq, &eval_map_work); + queue_work(trace_init_wq, &eval_map_work); return 0; } @@ -10807,8 +10807,8 @@ subsys_initcall(trace_eval_init); static int __init trace_eval_sync(void) { /* Make sure the eval map updates are finished */ - if (eval_map_wq) - destroy_workqueue(eval_map_wq); + if (trace_init_wq) + destroy_workqueue(trace_init_wq); return 0; } @@ -10969,9 +10969,9 @@ static __init int tracer_init_tracefs(void) if (ret) return 0; - if (eval_map_wq) { + if (trace_init_wq) { INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); - queue_work(eval_map_wq, &tracerfs_init_work); + queue_work(trace_init_wq, &tracerfs_init_work); } else { tracer_init_tracefs_work_func(NULL); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index de4e6713b84e..e52f259f8945 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -770,6 +770,8 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs); extern unsigned long tracing_thresh; +extern struct workqueue_struct *trace_init_wq __initdata; + /* PID filtering */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, -- 2.25.1 During kernel boot, the setup_boot_kprobe_events() function causes significant delays, increasing overall startup time. The root cause is a lock contention chain: its child function enable_boot_kprobe_events() requires the event_mutex, which is already held by early_event_add_tracer(). early_event_add_tracer() itself is blocked waiting for the trace_event_sem read-write lock, which is held for an extended period by trace_event_update_all(). To resolve this, we have moved the execution of setup_boot_kprobe_events() to the trace_init_wq workqueue, allowing it to run asynchronously. Signed-off-by: Yaxiong Tian --- kernel/trace/trace_kprobe.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9953506370a5..4c6621f02696 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -2031,6 +2031,13 @@ static __init int init_kprobe_trace_early(void) } core_initcall(init_kprobe_trace_early); +static struct work_struct kprobe_trace_work __initdata; + +static void __init kprobe_trace_works_func(struct work_struct *work) +{ + setup_boot_kprobe_events(); +} + /* Make a tracefs interface for controlling probe points */ static __init int init_kprobe_trace(void) { @@ -2048,7 +2055,12 @@ static __init int init_kprobe_trace(void) trace_create_file("kprobe_profile", TRACE_MODE_READ, NULL, NULL, &kprobe_profile_ops); - setup_boot_kprobe_events(); + if (trace_init_wq) { + INIT_WORK(&kprobe_trace_work, kprobe_trace_works_func); + queue_work(trace_init_wq, &kprobe_trace_work); + } else { + setup_boot_kprobe_events(); + } return 0; } -- 2.25.1 The init_blk_tracer() function causes significant boot delay as it waits for the trace_event_sem lock held by trace_event_update_all(). Specifically, its child function register_trace_event() requires this lock, which is occupied for an extended period during boot. To mitigate this contention, we have moved init_blk_tracer() to the trace_init_wq workqueue, allowing it to execute asynchronously and prevent blocking the main boot thread. Signed-off-by: Yaxiong Tian --- kernel/trace/blktrace.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d031c8d80be4..d611cd1f02ef 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1832,7 +1832,9 @@ static struct trace_event trace_blk_event = { .funcs = &trace_blk_event_funcs, }; -static int __init init_blk_tracer(void) +static struct work_struct blktrace_works __initdata; + +static int __init __init_blk_tracer(void) { if (!register_trace_event(&trace_blk_event)) { pr_warn("Warning: could not register block events\n"); @@ -1852,6 +1854,25 @@ static int __init init_blk_tracer(void) return 0; } +static void __init blktrace_works_func(struct work_struct *work) +{ + __init_blk_tracer(); +} + +static int __init init_blk_tracer(void) +{ + int ret = 0; + + if (trace_init_wq) { + INIT_WORK(&blktrace_works, blktrace_works_func); + queue_work(trace_init_wq, &blktrace_works); + } else { + ret = __init_blk_tracer(); + } + + return ret; +} + device_initcall(init_blk_tracer); static int blk_trace_remove_queue(struct request_queue *q) -- 2.25.1