Split out the code relaying a blktrace event to user-space using relayfs. This enables adding a second version supporting a new version of the protocol. Reviewed-by: Christoph Hellwig Signed-off-by: Johannes Thumshirn --- kernel/trace/blktrace.c | 60 ++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 25a0a1b09747..51745832c713 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -91,6 +91,26 @@ static void record_blktrace_event(struct blk_io_trace *t, pid_t pid, int cpu, memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len); } +static void relay_blktrace_event(struct blk_trace *bt, unsigned long sequence, + pid_t pid, int cpu, sector_t sector, int bytes, + u32 what, int error, u64 cgid, + ssize_t cgid_len, void *pdu_data, int pdu_len) +{ + struct blk_io_trace *t; + size_t trace_len = sizeof(*t) + pdu_len + cgid_len; + + t = relay_reserve(bt->rchan, trace_len); + if (!t) + return; + + t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; + t->sequence = sequence; + t->time = ktime_to_ns(ktime_get()); + + record_blktrace_event(t, pid, cpu, sector, bytes, what, bt->dev, error, + cgid, cgid_len, pdu_data, pdu_len); +} + /* * Send out a notify message. */ @@ -126,16 +146,9 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, if (!bt->rchan) return; - t = relay_reserve(bt->rchan, trace_len); - if (t) { - t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; - t->time = ktime_to_ns(ktime_get()); - - record_blktrace_event(t, pid, cpu, 0, 0, - action | (cgid ? __BLK_TN_CGROUP : 0), - bt->dev, 0, cgid, cgid_len, (void *)data, - len); - } + relay_blktrace_event(bt, 0, pid, cpu, 0, 0, + action | (cgid ? __BLK_TN_CGROUP : 0), 0, cgid, + cgid_len, (void *)data, len); } /* @@ -246,7 +259,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, struct task_struct *tsk = current; struct ring_buffer_event *event = NULL; struct trace_buffer *buffer = NULL; - struct blk_io_trace *t; unsigned long flags = 0; unsigned long *sequence; unsigned int trace_ctx = 0; @@ -278,20 +290,21 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, return; cpu = raw_smp_processor_id(); - trace_len = sizeof(*t) + pdu_len + cgid_len; if (blk_tracer) { tracing_record_cmdline(current); buffer = blk_tr->array_buffer.buffer; trace_ctx = tracing_gen_ctx_flags(0); + trace_len = sizeof(struct blk_io_trace) + pdu_len + cgid_len; event = trace_buffer_lock_reserve(buffer, TRACE_BLK, trace_len, trace_ctx); if (!event) return; - t = ring_buffer_event_data(event); - record_blktrace_event(t, pid, cpu, sector, bytes, what, bt->dev, - error, cgid, cgid_len, pdu_data, pdu_len); + record_blktrace_event(ring_buffer_event_data(event), + pid, cpu, sector, bytes, what, bt->dev, + error, cgid, cgid_len, pdu_data, + pdu_len); trace_buffer_unlock_commit(blk_tr, buffer, event, trace_ctx); return; @@ -306,19 +319,10 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, * from coming in and stepping on our toes. */ local_irq_save(flags); - t = relay_reserve(bt->rchan, trace_len); - if (t) { - sequence = per_cpu_ptr(bt->sequence, cpu); - - t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; - t->sequence = ++(*sequence); - t->time = ktime_to_ns(ktime_get()); - - record_blktrace_event(t, pid, cpu, sector, bytes, what, - bt->dev, error, cgid, cgid_len, - pdu_data, pdu_len); - } - + sequence = per_cpu_ptr(bt->sequence, cpu); + (*sequence)++; + relay_blktrace_event(bt, *sequence, pid, cpu, sector, bytes, what, + error, cgid, cgid_len, pdu_data, pdu_len); local_irq_restore(flags); } -- 2.51.0