1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blktrace_api.h>
12#include <linux/percpu.h>
13#include <linux/init.h>
14#include <linux/mutex.h>
15#include <linux/slab.h>
16#include <linux/debugfs.h>
17#include <linux/export.h>
18#include <linux/time.h>
19#include <linux/uaccess.h>
20#include <linux/list.h>
21#include <linux/blk-cgroup.h>
22
23#include "../../block/blk.h"
24
25#include <trace/events/block.h>
26
27#include "trace_output.h"
28
29#ifdef CONFIG_BLK_DEV_IO_TRACE
30
31static unsigned int blktrace_seq __read_mostly = 1;
32
33static struct trace_array *blk_tr;
34static bool blk_tracer_enabled __read_mostly;
35
36static LIST_HEAD(running_trace_list);
37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
38
39/* Select an alternative, minimalistic output than the original one */
40#define TRACE_BLK_OPT_CLASSIC 0x1
41#define TRACE_BLK_OPT_CGROUP 0x2
42#define TRACE_BLK_OPT_CGNAME 0x4
43
44static struct tracer_opt blk_tracer_opts[] = {
45 /* Default disable the minimalistic output */
46 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47#ifdef CONFIG_BLK_CGROUP
48 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
49 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
50#endif
51 { }
52};
53
54static struct tracer_flags blk_tracer_flags = {
55 .val = 0,
56 .opts = blk_tracer_opts,
57};
58
59/* Global reference count of probes */
60static DEFINE_MUTEX(blk_probe_mutex);
61static int blk_probes_ref;
62
63static void blk_register_tracepoints(void);
64static void blk_unregister_tracepoints(void);
65
66/*
67 * Send out a notify message.
68 */
69static void trace_note(struct blk_trace *bt, pid_t pid, int action,
70 const void *data, size_t len, u64 cgid)
71{
72 struct blk_io_trace *t;
73 struct ring_buffer_event *event = NULL;
74 struct trace_buffer *buffer = NULL;
75 unsigned int trace_ctx = 0;
76 int cpu = smp_processor_id();
77 bool blk_tracer = blk_tracer_enabled;
78 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
79
80 if (blk_tracer) {
81 buffer = blk_tr->array_buffer.buffer;
82 trace_ctx = tracing_gen_ctx_flags(irqflags: 0);
83 event = trace_buffer_lock_reserve(buffer, type: TRACE_BLK,
84 len: sizeof(*t) + len + cgid_len,
85 trace_ctx);
86 if (!event)
87 return;
88 t = ring_buffer_event_data(event);
89 goto record_it;
90 }
91
92 if (!bt->rchan)
93 return;
94
95 t = relay_reserve(chan: bt->rchan, length: sizeof(*t) + len + cgid_len);
96 if (t) {
97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
98 t->time = ktime_to_ns(kt: ktime_get());
99record_it:
100 t->device = bt->dev;
101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
102 t->pid = pid;
103 t->cpu = cpu;
104 t->pdu_len = len + cgid_len;
105 if (cgid_len)
106 memcpy(to: (void *)t + sizeof(*t), from: &cgid, len: cgid_len);
107 memcpy(to: (void *) t + sizeof(*t) + cgid_len, from: data, len);
108
109 if (blk_tracer)
110 trace_buffer_unlock_commit(tr: blk_tr, buffer, event, trace_ctx);
111 }
112}
113
114/*
115 * Send out a notify for this process, if we haven't done so since a trace
116 * started
117 */
118static void trace_note_tsk(struct task_struct *tsk)
119{
120 unsigned long flags;
121 struct blk_trace *bt;
122
123 tsk->btrace_seq = blktrace_seq;
124 raw_spin_lock_irqsave(&running_trace_lock, flags);
125 list_for_each_entry(bt, &running_trace_list, running_list) {
126 trace_note(bt, pid: tsk->pid, BLK_TN_PROCESS, data: tsk->comm,
127 len: sizeof(tsk->comm), cgid: 0);
128 }
129 raw_spin_unlock_irqrestore(&running_trace_lock, flags);
130}
131
132static void trace_note_time(struct blk_trace *bt)
133{
134 struct timespec64 now;
135 unsigned long flags;
136 u32 words[2];
137
138 /* need to check user space to see if this breaks in y2038 or y2106 */
139 ktime_get_real_ts64(tv: &now);
140 words[0] = (u32)now.tv_sec;
141 words[1] = now.tv_nsec;
142
143 local_irq_save(flags);
144 trace_note(bt, pid: 0, BLK_TN_TIMESTAMP, data: words, len: sizeof(words), cgid: 0);
145 local_irq_restore(flags);
146}
147
148void __blk_trace_note_message(struct blk_trace *bt,
149 struct cgroup_subsys_state *css, const char *fmt, ...)
150{
151 int n;
152 va_list args;
153 unsigned long flags;
154 char *buf;
155 u64 cgid = 0;
156
157 if (unlikely(bt->trace_state != Blktrace_running &&
158 !blk_tracer_enabled))
159 return;
160
161 /*
162 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
163 * message to the trace.
164 */
165 if (!(bt->act_mask & BLK_TC_NOTIFY))
166 return;
167
168 local_irq_save(flags);
169 buf = this_cpu_ptr(bt->msg_data);
170 va_start(args, fmt);
171 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
172 va_end(args);
173
174#ifdef CONFIG_BLK_CGROUP
175 if (css && (blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
176 cgid = cgroup_id(cgrp: css->cgroup);
177 else
178 cgid = 1;
179#endif
180 trace_note(bt, current->pid, BLK_TN_MESSAGE, data: buf, len: n, cgid);
181 local_irq_restore(flags);
182}
183EXPORT_SYMBOL_GPL(__blk_trace_note_message);
184
185static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
186 pid_t pid)
187{
188 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
189 return 1;
190 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
191 return 1;
192 if (bt->pid && pid != bt->pid)
193 return 1;
194
195 return 0;
196}
197
198/*
199 * Data direction bit lookup
200 */
201static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
202 BLK_TC_ACT(BLK_TC_WRITE) };
203
204#define BLK_TC_RAHEAD BLK_TC_AHEAD
205#define BLK_TC_PREFLUSH BLK_TC_FLUSH
206
207/* The ilog2() calls fall out because they're constant */
208#define MASK_TC_BIT(rw, __name) ((__force u32)(rw & REQ_ ## __name) << \
209 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
210
211/*
212 * The worker for the various blk_add_trace*() types. Fills out a
213 * blk_io_trace structure and places it in a per-cpu subbuffer.
214 */
215static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
216 const blk_opf_t opf, u32 what, int error,
217 int pdu_len, void *pdu_data, u64 cgid)
218{
219 struct task_struct *tsk = current;
220 struct ring_buffer_event *event = NULL;
221 struct trace_buffer *buffer = NULL;
222 struct blk_io_trace *t;
223 unsigned long flags = 0;
224 unsigned long *sequence;
225 unsigned int trace_ctx = 0;
226 pid_t pid;
227 int cpu;
228 bool blk_tracer = blk_tracer_enabled;
229 ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
230 const enum req_op op = opf & REQ_OP_MASK;
231
232 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
233 return;
234
235 what |= ddir_act[op_is_write(op) ? WRITE : READ];
236 what |= MASK_TC_BIT(opf, SYNC);
237 what |= MASK_TC_BIT(opf, RAHEAD);
238 what |= MASK_TC_BIT(opf, META);
239 what |= MASK_TC_BIT(opf, PREFLUSH);
240 what |= MASK_TC_BIT(opf, FUA);
241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
242 what |= BLK_TC_ACT(BLK_TC_DISCARD);
243 if (op == REQ_OP_FLUSH)
244 what |= BLK_TC_ACT(BLK_TC_FLUSH);
245 if (cgid)
246 what |= __BLK_TA_CGROUP;
247
248 pid = tsk->pid;
249 if (act_log_check(bt, what, sector, pid))
250 return;
251 cpu = raw_smp_processor_id();
252
253 if (blk_tracer) {
254 tracing_record_cmdline(current);
255
256 buffer = blk_tr->array_buffer.buffer;
257 trace_ctx = tracing_gen_ctx_flags(irqflags: 0);
258 event = trace_buffer_lock_reserve(buffer, type: TRACE_BLK,
259 len: sizeof(*t) + pdu_len + cgid_len,
260 trace_ctx);
261 if (!event)
262 return;
263 t = ring_buffer_event_data(event);
264 goto record_it;
265 }
266
267 if (unlikely(tsk->btrace_seq != blktrace_seq))
268 trace_note_tsk(tsk);
269
270 /*
271 * A word about the locking here - we disable interrupts to reserve
272 * some space in the relay per-cpu buffer, to prevent an irq
273 * from coming in and stepping on our toes.
274 */
275 local_irq_save(flags);
276 t = relay_reserve(chan: bt->rchan, length: sizeof(*t) + pdu_len + cgid_len);
277 if (t) {
278 sequence = per_cpu_ptr(bt->sequence, cpu);
279
280 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
281 t->sequence = ++(*sequence);
282 t->time = ktime_to_ns(kt: ktime_get());
283record_it:
284 /*
285 * These two are not needed in ftrace as they are in the
286 * generic trace_entry, filled by tracing_generic_entry_update,
287 * but for the trace_event->bin() synthesizer benefit we do it
288 * here too.
289 */
290 t->cpu = cpu;
291 t->pid = pid;
292
293 t->sector = sector;
294 t->bytes = bytes;
295 t->action = what;
296 t->device = bt->dev;
297 t->error = error;
298 t->pdu_len = pdu_len + cgid_len;
299
300 if (cgid_len)
301 memcpy(to: (void *)t + sizeof(*t), from: &cgid, len: cgid_len);
302 if (pdu_len)
303 memcpy(to: (void *)t + sizeof(*t) + cgid_len, from: pdu_data, len: pdu_len);
304
305 if (blk_tracer) {
306 trace_buffer_unlock_commit(tr: blk_tr, buffer, event, trace_ctx);
307 return;
308 }
309 }
310
311 local_irq_restore(flags);
312}
313
314static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
315{
316 relay_close(chan: bt->rchan);
317
318 /*
319 * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
320 * under 'q->debugfs_dir', thus lookup and remove them.
321 */
322 if (!bt->dir) {
323 debugfs_lookup_and_remove(name: "dropped", parent: q->debugfs_dir);
324 debugfs_lookup_and_remove(name: "msg", parent: q->debugfs_dir);
325 } else {
326 debugfs_remove(dentry: bt->dir);
327 }
328 free_percpu(pdata: bt->sequence);
329 free_percpu(pdata: bt->msg_data);
330 kfree(objp: bt);
331}
332
333static void get_probe_ref(void)
334{
335 mutex_lock(lock: &blk_probe_mutex);
336 if (++blk_probes_ref == 1)
337 blk_register_tracepoints();
338 mutex_unlock(lock: &blk_probe_mutex);
339}
340
341static void put_probe_ref(void)
342{
343 mutex_lock(lock: &blk_probe_mutex);
344 if (!--blk_probes_ref)
345 blk_unregister_tracepoints();
346 mutex_unlock(lock: &blk_probe_mutex);
347}
348
349static int blk_trace_start(struct blk_trace *bt)
350{
351 if (bt->trace_state != Blktrace_setup &&
352 bt->trace_state != Blktrace_stopped)
353 return -EINVAL;
354
355 blktrace_seq++;
356 smp_mb();
357 bt->trace_state = Blktrace_running;
358 raw_spin_lock_irq(&running_trace_lock);
359 list_add(new: &bt->running_list, head: &running_trace_list);
360 raw_spin_unlock_irq(&running_trace_lock);
361 trace_note_time(bt);
362
363 return 0;
364}
365
366static int blk_trace_stop(struct blk_trace *bt)
367{
368 if (bt->trace_state != Blktrace_running)
369 return -EINVAL;
370
371 bt->trace_state = Blktrace_stopped;
372 raw_spin_lock_irq(&running_trace_lock);
373 list_del_init(entry: &bt->running_list);
374 raw_spin_unlock_irq(&running_trace_lock);
375 relay_flush(chan: bt->rchan);
376
377 return 0;
378}
379
380static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
381{
382 blk_trace_stop(bt);
383 synchronize_rcu();
384 blk_trace_free(q, bt);
385 put_probe_ref();
386}
387
388static int __blk_trace_remove(struct request_queue *q)
389{
390 struct blk_trace *bt;
391
392 bt = rcu_replace_pointer(q->blk_trace, NULL,
393 lockdep_is_held(&q->debugfs_mutex));
394 if (!bt)
395 return -EINVAL;
396
397 blk_trace_cleanup(q, bt);
398
399 return 0;
400}
401
402int blk_trace_remove(struct request_queue *q)
403{
404 int ret;
405
406 mutex_lock(lock: &q->debugfs_mutex);
407 ret = __blk_trace_remove(q);
408 mutex_unlock(lock: &q->debugfs_mutex);
409
410 return ret;
411}
412EXPORT_SYMBOL_GPL(blk_trace_remove);
413
414static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
415 size_t count, loff_t *ppos)
416{
417 struct blk_trace *bt = filp->private_data;
418 size_t dropped = relay_stats(chan: bt->rchan, flags: RELAY_STATS_BUF_FULL);
419 char buf[16];
420
421 snprintf(buf, size: sizeof(buf), fmt: "%zu\n", dropped);
422
423 return simple_read_from_buffer(to: buffer, count, ppos, from: buf, available: strlen(buf));
424}
425
426static const struct file_operations blk_dropped_fops = {
427 .owner = THIS_MODULE,
428 .open = simple_open,
429 .read = blk_dropped_read,
430 .llseek = default_llseek,
431};
432
433static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
434 size_t count, loff_t *ppos)
435{
436 char *msg;
437 struct blk_trace *bt;
438
439 if (count >= BLK_TN_MAX_MSG)
440 return -EINVAL;
441
442 msg = memdup_user_nul(buffer, count);
443 if (IS_ERR(ptr: msg))
444 return PTR_ERR(ptr: msg);
445
446 bt = filp->private_data;
447 __blk_trace_note_message(bt, NULL, "%s", msg);
448 kfree(objp: msg);
449
450 return count;
451}
452
453static const struct file_operations blk_msg_fops = {
454 .owner = THIS_MODULE,
455 .open = simple_open,
456 .write = blk_msg_write,
457 .llseek = noop_llseek,
458};
459
460static int blk_remove_buf_file_callback(struct dentry *dentry)
461{
462 debugfs_remove(dentry);
463
464 return 0;
465}
466
467static struct dentry *blk_create_buf_file_callback(const char *filename,
468 struct dentry *parent,
469 umode_t mode,
470 struct rchan_buf *buf,
471 int *is_global)
472{
473 return debugfs_create_file(filename, mode, parent, buf,
474 &relay_file_operations);
475}
476
477static const struct rchan_callbacks blk_relay_callbacks = {
478 .create_buf_file = blk_create_buf_file_callback,
479 .remove_buf_file = blk_remove_buf_file_callback,
480};
481
482static void blk_trace_setup_lba(struct blk_trace *bt,
483 struct block_device *bdev)
484{
485 if (bdev) {
486 bt->start_lba = bdev->bd_start_sect;
487 bt->end_lba = bdev->bd_start_sect + bdev_nr_sectors(bdev);
488 } else {
489 bt->start_lba = 0;
490 bt->end_lba = -1ULL;
491 }
492}
493
494/*
495 * Setup everything required to start tracing
496 */
497static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
498 struct block_device *bdev,
499 struct blk_user_trace_setup *buts)
500{
501 struct blk_trace *bt = NULL;
502 struct dentry *dir = NULL;
503 int ret;
504
505 lockdep_assert_held(&q->debugfs_mutex);
506
507 if (!buts->buf_size || !buts->buf_nr)
508 return -EINVAL;
509
510 strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE);
511
512 /*
513 * some device names have larger paths - convert the slashes
514 * to underscores for this to work as expected
515 */
516 strreplace(str: buts->name, old: '/', new: '_');
517
518 /*
519 * bdev can be NULL, as with scsi-generic, this is a helpful as
520 * we can be.
521 */
522 if (rcu_dereference_protected(q->blk_trace,
523 lockdep_is_held(&q->debugfs_mutex))) {
524 pr_warn("Concurrent blktraces are not allowed on %s\n",
525 buts->name);
526 return -EBUSY;
527 }
528
529 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
530 if (!bt)
531 return -ENOMEM;
532
533 ret = -ENOMEM;
534 bt->sequence = alloc_percpu(unsigned long);
535 if (!bt->sequence)
536 goto err;
537
538 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
539 if (!bt->msg_data)
540 goto err;
541
542 /*
543 * When tracing the whole disk reuse the existing debugfs directory
544 * created by the block layer on init. For partitions block devices,
545 * and scsi-generic block devices we create a temporary new debugfs
546 * directory that will be removed once the trace ends.
547 */
548 if (bdev && !bdev_is_partition(bdev))
549 dir = q->debugfs_dir;
550 else
551 bt->dir = dir = debugfs_create_dir(name: buts->name, parent: blk_debugfs_root);
552
553 /*
554 * As blktrace relies on debugfs for its interface the debugfs directory
555 * is required, contrary to the usual mantra of not checking for debugfs
556 * files or directories.
557 */
558 if (IS_ERR_OR_NULL(ptr: dir)) {
559 pr_warn("debugfs_dir not present for %s so skipping\n",
560 buts->name);
561 ret = -ENOENT;
562 goto err;
563 }
564
565 bt->dev = dev;
566 INIT_LIST_HEAD(list: &bt->running_list);
567
568 ret = -EIO;
569 debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
570 debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
571
572 bt->rchan = relay_open(base_filename: "trace", parent: dir, subbuf_size: buts->buf_size,
573 n_subbufs: buts->buf_nr, cb: &blk_relay_callbacks, private_data: bt);
574 if (!bt->rchan)
575 goto err;
576
577 bt->act_mask = buts->act_mask;
578 if (!bt->act_mask)
579 bt->act_mask = (u16) -1;
580
581 blk_trace_setup_lba(bt, bdev);
582
583 /* overwrite with user settings */
584 if (buts->start_lba)
585 bt->start_lba = buts->start_lba;
586 if (buts->end_lba)
587 bt->end_lba = buts->end_lba;
588
589 bt->pid = buts->pid;
590 bt->trace_state = Blktrace_setup;
591
592 rcu_assign_pointer(q->blk_trace, bt);
593 get_probe_ref();
594
595 ret = 0;
596err:
597 if (ret)
598 blk_trace_free(q, bt);
599 return ret;
600}
601
602int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
603 struct block_device *bdev,
604 char __user *arg)
605{
606 struct blk_user_trace_setup buts;
607 int ret;
608
609 ret = copy_from_user(to: &buts, from: arg, n: sizeof(buts));
610 if (ret)
611 return -EFAULT;
612
613 mutex_lock(lock: &q->debugfs_mutex);
614 ret = do_blk_trace_setup(q, name, dev, bdev, buts: &buts);
615 mutex_unlock(lock: &q->debugfs_mutex);
616 if (ret)
617 return ret;
618
619 if (copy_to_user(to: arg, from: &buts, n: sizeof(buts))) {
620 blk_trace_remove(q);
621 return -EFAULT;
622 }
623 return 0;
624}
625EXPORT_SYMBOL_GPL(blk_trace_setup);
626
627#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
628static int compat_blk_trace_setup(struct request_queue *q, char *name,
629 dev_t dev, struct block_device *bdev,
630 char __user *arg)
631{
632 struct blk_user_trace_setup buts;
633 struct compat_blk_user_trace_setup cbuts;
634 int ret;
635
636 if (copy_from_user(to: &cbuts, from: arg, n: sizeof(cbuts)))
637 return -EFAULT;
638
639 buts = (struct blk_user_trace_setup) {
640 .act_mask = cbuts.act_mask,
641 .buf_size = cbuts.buf_size,
642 .buf_nr = cbuts.buf_nr,
643 .start_lba = cbuts.start_lba,
644 .end_lba = cbuts.end_lba,
645 .pid = cbuts.pid,
646 };
647
648 mutex_lock(lock: &q->debugfs_mutex);
649 ret = do_blk_trace_setup(q, name, dev, bdev, buts: &buts);
650 mutex_unlock(lock: &q->debugfs_mutex);
651 if (ret)
652 return ret;
653
654 if (copy_to_user(to: arg, from: &buts.name, ARRAY_SIZE(buts.name))) {
655 blk_trace_remove(q);
656 return -EFAULT;
657 }
658
659 return 0;
660}
661#endif
662
663static int __blk_trace_startstop(struct request_queue *q, int start)
664{
665 struct blk_trace *bt;
666
667 bt = rcu_dereference_protected(q->blk_trace,
668 lockdep_is_held(&q->debugfs_mutex));
669 if (bt == NULL)
670 return -EINVAL;
671
672 if (start)
673 return blk_trace_start(bt);
674 else
675 return blk_trace_stop(bt);
676}
677
678int blk_trace_startstop(struct request_queue *q, int start)
679{
680 int ret;
681
682 mutex_lock(lock: &q->debugfs_mutex);
683 ret = __blk_trace_startstop(q, start);
684 mutex_unlock(lock: &q->debugfs_mutex);
685
686 return ret;
687}
688EXPORT_SYMBOL_GPL(blk_trace_startstop);
689
690/*
691 * When reading or writing the blktrace sysfs files, the references to the
692 * opened sysfs or device files should prevent the underlying block device
693 * from being removed. So no further delete protection is really needed.
694 */
695
696/**
697 * blk_trace_ioctl - handle the ioctls associated with tracing
698 * @bdev: the block device
699 * @cmd: the ioctl cmd
700 * @arg: the argument data, if any
701 *
702 **/
703int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
704{
705 struct request_queue *q = bdev_get_queue(bdev);
706 int ret, start = 0;
707 char b[BDEVNAME_SIZE];
708
709 switch (cmd) {
710 case BLKTRACESETUP:
711 snprintf(buf: b, size: sizeof(b), fmt: "%pg", bdev);
712 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
713 break;
714#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
715 case BLKTRACESETUP32:
716 snprintf(buf: b, size: sizeof(b), fmt: "%pg", bdev);
717 ret = compat_blk_trace_setup(q, name: b, dev: bdev->bd_dev, bdev, arg);
718 break;
719#endif
720 case BLKTRACESTART:
721 start = 1;
722 fallthrough;
723 case BLKTRACESTOP:
724 ret = blk_trace_startstop(q, start);
725 break;
726 case BLKTRACETEARDOWN:
727 ret = blk_trace_remove(q);
728 break;
729 default:
730 ret = -ENOTTY;
731 break;
732 }
733 return ret;
734}
735
736/**
737 * blk_trace_shutdown - stop and cleanup trace structures
738 * @q: the request queue associated with the device
739 *
740 **/
741void blk_trace_shutdown(struct request_queue *q)
742{
743 if (rcu_dereference_protected(q->blk_trace,
744 lockdep_is_held(&q->debugfs_mutex)))
745 __blk_trace_remove(q);
746}
747
748#ifdef CONFIG_BLK_CGROUP
749static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
750{
751 struct cgroup_subsys_state *blkcg_css;
752 struct blk_trace *bt;
753
754 /* We don't use the 'bt' value here except as an optimization... */
755 bt = rcu_dereference_protected(q->blk_trace, 1);
756 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
757 return 0;
758
759 blkcg_css = bio_blkcg_css(bio);
760 if (!blkcg_css)
761 return 0;
762 return cgroup_id(cgrp: blkcg_css->cgroup);
763}
764#else
765static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
766{
767 return 0;
768}
769#endif
770
771static u64
772blk_trace_request_get_cgid(struct request *rq)
773{
774 if (!rq->bio)
775 return 0;
776 /* Use the first bio */
777 return blk_trace_bio_get_cgid(q: rq->q, bio: rq->bio);
778}
779
780/*
781 * blktrace probes
782 */
783
784/**
785 * blk_add_trace_rq - Add a trace for a request oriented action
786 * @rq: the source request
787 * @error: return status to log
788 * @nr_bytes: number of completed bytes
789 * @what: the action
790 * @cgid: the cgroup info
791 *
792 * Description:
793 * Records an action against a request. Will log the bio offset + size.
794 *
795 **/
796static void blk_add_trace_rq(struct request *rq, blk_status_t error,
797 unsigned int nr_bytes, u32 what, u64 cgid)
798{
799 struct blk_trace *bt;
800
801 rcu_read_lock();
802 bt = rcu_dereference(rq->q->blk_trace);
803 if (likely(!bt)) {
804 rcu_read_unlock();
805 return;
806 }
807
808 if (blk_rq_is_passthrough(rq))
809 what |= BLK_TC_ACT(BLK_TC_PC);
810 else
811 what |= BLK_TC_ACT(BLK_TC_FS);
812
813 __blk_add_trace(bt, sector: blk_rq_trace_sector(rq), bytes: nr_bytes, opf: rq->cmd_flags,
814 what, error: blk_status_to_errno(status: error), pdu_len: 0, NULL, cgid);
815 rcu_read_unlock();
816}
817
818static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
819{
820 blk_add_trace_rq(rq, error: 0, nr_bytes: blk_rq_bytes(rq), BLK_TA_INSERT,
821 cgid: blk_trace_request_get_cgid(rq));
822}
823
824static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
825{
826 blk_add_trace_rq(rq, error: 0, nr_bytes: blk_rq_bytes(rq), BLK_TA_ISSUE,
827 cgid: blk_trace_request_get_cgid(rq));
828}
829
830static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
831{
832 blk_add_trace_rq(rq, error: 0, nr_bytes: blk_rq_bytes(rq), BLK_TA_BACKMERGE,
833 cgid: blk_trace_request_get_cgid(rq));
834}
835
836static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
837{
838 blk_add_trace_rq(rq, error: 0, nr_bytes: blk_rq_bytes(rq), BLK_TA_REQUEUE,
839 cgid: blk_trace_request_get_cgid(rq));
840}
841
842static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
843 blk_status_t error, unsigned int nr_bytes)
844{
845 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
846 cgid: blk_trace_request_get_cgid(rq));
847}
848
849/**
850 * blk_add_trace_bio - Add a trace for a bio oriented action
851 * @q: queue the io is for
852 * @bio: the source bio
853 * @what: the action
854 * @error: error, if any
855 *
856 * Description:
857 * Records an action against a bio. Will log the bio offset + size.
858 *
859 **/
860static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
861 u32 what, int error)
862{
863 struct blk_trace *bt;
864
865 rcu_read_lock();
866 bt = rcu_dereference(q->blk_trace);
867 if (likely(!bt)) {
868 rcu_read_unlock();
869 return;
870 }
871
872 __blk_add_trace(bt, sector: bio->bi_iter.bi_sector, bytes: bio->bi_iter.bi_size,
873 opf: bio->bi_opf, what, error, pdu_len: 0, NULL,
874 cgid: blk_trace_bio_get_cgid(q, bio));
875 rcu_read_unlock();
876}
877
878static void blk_add_trace_bio_complete(void *ignore,
879 struct request_queue *q, struct bio *bio)
880{
881 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
882 error: blk_status_to_errno(status: bio->bi_status));
883}
884
885static void blk_add_trace_bio_backmerge(void *ignore, struct bio *bio)
886{
887 blk_add_trace_bio(q: bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BACKMERGE,
888 error: 0);
889}
890
891static void blk_add_trace_bio_frontmerge(void *ignore, struct bio *bio)
892{
893 blk_add_trace_bio(q: bio->bi_bdev->bd_disk->queue, bio, BLK_TA_FRONTMERGE,
894 error: 0);
895}
896
897static void blk_add_trace_bio_queue(void *ignore, struct bio *bio)
898{
899 blk_add_trace_bio(q: bio->bi_bdev->bd_disk->queue, bio, BLK_TA_QUEUE, error: 0);
900}
901
902static void blk_add_trace_getrq(void *ignore, struct bio *bio)
903{
904 blk_add_trace_bio(q: bio->bi_bdev->bd_disk->queue, bio, BLK_TA_GETRQ, error: 0);
905}
906
907static void blk_add_trace_plug(void *ignore, struct request_queue *q)
908{
909 struct blk_trace *bt;
910
911 rcu_read_lock();
912 bt = rcu_dereference(q->blk_trace);
913 if (bt)
914 __blk_add_trace(bt, sector: 0, bytes: 0, opf: 0, BLK_TA_PLUG, error: 0, pdu_len: 0, NULL, cgid: 0);
915 rcu_read_unlock();
916}
917
918static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
919 unsigned int depth, bool explicit)
920{
921 struct blk_trace *bt;
922
923 rcu_read_lock();
924 bt = rcu_dereference(q->blk_trace);
925 if (bt) {
926 __be64 rpdu = cpu_to_be64(depth);
927 u32 what;
928
929 if (explicit)
930 what = BLK_TA_UNPLUG_IO;
931 else
932 what = BLK_TA_UNPLUG_TIMER;
933
934 __blk_add_trace(bt, sector: 0, bytes: 0, opf: 0, what, error: 0, pdu_len: sizeof(rpdu), pdu_data: &rpdu, cgid: 0);
935 }
936 rcu_read_unlock();
937}
938
939static void blk_add_trace_split(void *ignore, struct bio *bio, unsigned int pdu)
940{
941 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
942 struct blk_trace *bt;
943
944 rcu_read_lock();
945 bt = rcu_dereference(q->blk_trace);
946 if (bt) {
947 __be64 rpdu = cpu_to_be64(pdu);
948
949 __blk_add_trace(bt, sector: bio->bi_iter.bi_sector,
950 bytes: bio->bi_iter.bi_size, opf: bio->bi_opf, BLK_TA_SPLIT,
951 error: blk_status_to_errno(status: bio->bi_status),
952 pdu_len: sizeof(rpdu), pdu_data: &rpdu,
953 cgid: blk_trace_bio_get_cgid(q, bio));
954 }
955 rcu_read_unlock();
956}
957
958/**
959 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
960 * @ignore: trace callback data parameter (not used)
961 * @bio: the source bio
962 * @dev: source device
963 * @from: source sector
964 *
965 * Called after a bio is remapped to a different device and/or sector.
966 **/
967static void blk_add_trace_bio_remap(void *ignore, struct bio *bio, dev_t dev,
968 sector_t from)
969{
970 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
971 struct blk_trace *bt;
972 struct blk_io_trace_remap r;
973
974 rcu_read_lock();
975 bt = rcu_dereference(q->blk_trace);
976 if (likely(!bt)) {
977 rcu_read_unlock();
978 return;
979 }
980
981 r.device_from = cpu_to_be32(dev);
982 r.device_to = cpu_to_be32(bio_dev(bio));
983 r.sector_from = cpu_to_be64(from);
984
985 __blk_add_trace(bt, sector: bio->bi_iter.bi_sector, bytes: bio->bi_iter.bi_size,
986 opf: bio->bi_opf, BLK_TA_REMAP,
987 error: blk_status_to_errno(status: bio->bi_status),
988 pdu_len: sizeof(r), pdu_data: &r, cgid: blk_trace_bio_get_cgid(q, bio));
989 rcu_read_unlock();
990}
991
992/**
993 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
994 * @ignore: trace callback data parameter (not used)
995 * @rq: the source request
996 * @dev: target device
997 * @from: source sector
998 *
999 * Description:
1000 * Device mapper remaps request to other devices.
1001 * Add a trace for that action.
1002 *
1003 **/
1004static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
1005 sector_t from)
1006{
1007 struct blk_trace *bt;
1008 struct blk_io_trace_remap r;
1009
1010 rcu_read_lock();
1011 bt = rcu_dereference(rq->q->blk_trace);
1012 if (likely(!bt)) {
1013 rcu_read_unlock();
1014 return;
1015 }
1016
1017 r.device_from = cpu_to_be32(dev);
1018 r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
1019 r.sector_from = cpu_to_be64(from);
1020
1021 __blk_add_trace(bt, sector: blk_rq_pos(rq), bytes: blk_rq_bytes(rq),
1022 opf: rq->cmd_flags, BLK_TA_REMAP, error: 0,
1023 pdu_len: sizeof(r), pdu_data: &r, cgid: blk_trace_request_get_cgid(rq));
1024 rcu_read_unlock();
1025}
1026
1027/**
1028 * blk_add_driver_data - Add binary message with driver-specific data
1029 * @rq: io request
1030 * @data: driver-specific data
1031 * @len: length of driver-specific data
1032 *
1033 * Description:
1034 * Some drivers might want to write driver-specific data per request.
1035 *
1036 **/
1037void blk_add_driver_data(struct request *rq, void *data, size_t len)
1038{
1039 struct blk_trace *bt;
1040
1041 rcu_read_lock();
1042 bt = rcu_dereference(rq->q->blk_trace);
1043 if (likely(!bt)) {
1044 rcu_read_unlock();
1045 return;
1046 }
1047
1048 __blk_add_trace(bt, sector: blk_rq_trace_sector(rq), bytes: blk_rq_bytes(rq), opf: 0,
1049 BLK_TA_DRV_DATA, error: 0, pdu_len: len, pdu_data: data,
1050 cgid: blk_trace_request_get_cgid(rq));
1051 rcu_read_unlock();
1052}
1053EXPORT_SYMBOL_GPL(blk_add_driver_data);
1054
1055static void blk_register_tracepoints(void)
1056{
1057 int ret;
1058
1059 ret = register_trace_block_rq_insert(probe: blk_add_trace_rq_insert, NULL);
1060 WARN_ON(ret);
1061 ret = register_trace_block_rq_issue(probe: blk_add_trace_rq_issue, NULL);
1062 WARN_ON(ret);
1063 ret = register_trace_block_rq_merge(probe: blk_add_trace_rq_merge, NULL);
1064 WARN_ON(ret);
1065 ret = register_trace_block_rq_requeue(probe: blk_add_trace_rq_requeue, NULL);
1066 WARN_ON(ret);
1067 ret = register_trace_block_rq_complete(probe: blk_add_trace_rq_complete, NULL);
1068 WARN_ON(ret);
1069 ret = register_trace_block_bio_complete(probe: blk_add_trace_bio_complete, NULL);
1070 WARN_ON(ret);
1071 ret = register_trace_block_bio_backmerge(probe: blk_add_trace_bio_backmerge, NULL);
1072 WARN_ON(ret);
1073 ret = register_trace_block_bio_frontmerge(probe: blk_add_trace_bio_frontmerge, NULL);
1074 WARN_ON(ret);
1075 ret = register_trace_block_bio_queue(probe: blk_add_trace_bio_queue, NULL);
1076 WARN_ON(ret);
1077 ret = register_trace_block_getrq(probe: blk_add_trace_getrq, NULL);
1078 WARN_ON(ret);
1079 ret = register_trace_block_plug(probe: blk_add_trace_plug, NULL);
1080 WARN_ON(ret);
1081 ret = register_trace_block_unplug(probe: blk_add_trace_unplug, NULL);
1082 WARN_ON(ret);
1083 ret = register_trace_block_split(probe: blk_add_trace_split, NULL);
1084 WARN_ON(ret);
1085 ret = register_trace_block_bio_remap(probe: blk_add_trace_bio_remap, NULL);
1086 WARN_ON(ret);
1087 ret = register_trace_block_rq_remap(probe: blk_add_trace_rq_remap, NULL);
1088 WARN_ON(ret);
1089}
1090
1091static void blk_unregister_tracepoints(void)
1092{
1093 unregister_trace_block_rq_remap(probe: blk_add_trace_rq_remap, NULL);
1094 unregister_trace_block_bio_remap(probe: blk_add_trace_bio_remap, NULL);
1095 unregister_trace_block_split(probe: blk_add_trace_split, NULL);
1096 unregister_trace_block_unplug(probe: blk_add_trace_unplug, NULL);
1097 unregister_trace_block_plug(probe: blk_add_trace_plug, NULL);
1098 unregister_trace_block_getrq(probe: blk_add_trace_getrq, NULL);
1099 unregister_trace_block_bio_queue(probe: blk_add_trace_bio_queue, NULL);
1100 unregister_trace_block_bio_frontmerge(probe: blk_add_trace_bio_frontmerge, NULL);
1101 unregister_trace_block_bio_backmerge(probe: blk_add_trace_bio_backmerge, NULL);
1102 unregister_trace_block_bio_complete(probe: blk_add_trace_bio_complete, NULL);
1103 unregister_trace_block_rq_complete(probe: blk_add_trace_rq_complete, NULL);
1104 unregister_trace_block_rq_requeue(probe: blk_add_trace_rq_requeue, NULL);
1105 unregister_trace_block_rq_merge(probe: blk_add_trace_rq_merge, NULL);
1106 unregister_trace_block_rq_issue(probe: blk_add_trace_rq_issue, NULL);
1107 unregister_trace_block_rq_insert(probe: blk_add_trace_rq_insert, NULL);
1108
1109 tracepoint_synchronize_unregister();
1110}
1111
1112/*
1113 * struct blk_io_tracer formatting routines
1114 */
1115
1116static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1117{
1118 int i = 0;
1119 int tc = t->action >> BLK_TC_SHIFT;
1120
1121 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1122 rwbs[i++] = 'N';
1123 goto out;
1124 }
1125
1126 if (tc & BLK_TC_FLUSH)
1127 rwbs[i++] = 'F';
1128
1129 if (tc & BLK_TC_DISCARD)
1130 rwbs[i++] = 'D';
1131 else if (tc & BLK_TC_WRITE)
1132 rwbs[i++] = 'W';
1133 else if (t->bytes)
1134 rwbs[i++] = 'R';
1135 else
1136 rwbs[i++] = 'N';
1137
1138 if (tc & BLK_TC_FUA)
1139 rwbs[i++] = 'F';
1140 if (tc & BLK_TC_AHEAD)
1141 rwbs[i++] = 'A';
1142 if (tc & BLK_TC_SYNC)
1143 rwbs[i++] = 'S';
1144 if (tc & BLK_TC_META)
1145 rwbs[i++] = 'M';
1146out:
1147 rwbs[i] = '\0';
1148}
1149
1150static inline
1151const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1152{
1153 return (const struct blk_io_trace *)ent;
1154}
1155
1156static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1157{
1158 return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
1159}
1160
1161static inline u64 t_cgid(const struct trace_entry *ent)
1162{
1163 return *(u64 *)(te_blk_io_trace(ent) + 1);
1164}
1165
1166static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1167{
1168 return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
1169}
1170
1171static inline u32 t_action(const struct trace_entry *ent)
1172{
1173 return te_blk_io_trace(ent)->action;
1174}
1175
1176static inline u32 t_bytes(const struct trace_entry *ent)
1177{
1178 return te_blk_io_trace(ent)->bytes;
1179}
1180
1181static inline u32 t_sec(const struct trace_entry *ent)
1182{
1183 return te_blk_io_trace(ent)->bytes >> 9;
1184}
1185
1186static inline unsigned long long t_sector(const struct trace_entry *ent)
1187{
1188 return te_blk_io_trace(ent)->sector;
1189}
1190
1191static inline __u16 t_error(const struct trace_entry *ent)
1192{
1193 return te_blk_io_trace(ent)->error;
1194}
1195
1196static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1197{
1198 const __be64 *val = pdu_start(ent, has_cg);
1199 return be64_to_cpu(*val);
1200}
1201
1202typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1203 bool has_cg);
1204
1205static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1206 bool has_cg)
1207{
1208 char rwbs[RWBS_LEN];
1209 unsigned long long ts = iter->ts;
1210 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1211 unsigned secs = (unsigned long)ts;
1212 const struct blk_io_trace *t = te_blk_io_trace(ent: iter->ent);
1213
1214 fill_rwbs(rwbs, t);
1215
1216 trace_seq_printf(s: &iter->seq,
1217 fmt: "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1218 MAJOR(t->device), MINOR(t->device), iter->cpu,
1219 secs, nsec_rem, iter->ent->pid, act, rwbs);
1220}
1221
1222static void blk_log_action(struct trace_iterator *iter, const char *act,
1223 bool has_cg)
1224{
1225 char rwbs[RWBS_LEN];
1226 const struct blk_io_trace *t = te_blk_io_trace(ent: iter->ent);
1227
1228 fill_rwbs(rwbs, t);
1229 if (has_cg) {
1230 u64 id = t_cgid(ent: iter->ent);
1231
1232 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1233 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1234
1235 cgroup_path_from_kernfs_id(id, buf: blkcg_name_buf,
1236 buflen: sizeof(blkcg_name_buf));
1237 trace_seq_printf(s: &iter->seq, fmt: "%3d,%-3d %s %2s %3s ",
1238 MAJOR(t->device), MINOR(t->device),
1239 blkcg_name_buf, act, rwbs);
1240 } else {
1241 /*
1242 * The cgid portion used to be "INO,GEN". Userland
1243 * builds a FILEID_INO32_GEN fid out of them and
1244 * opens the cgroup using open_by_handle_at(2).
1245 * While 32bit ino setups are still the same, 64bit
1246 * ones now use the 64bit ino as the whole ID and
1247 * no longer use generation.
1248 *
1249 * Regardless of the content, always output
1250 * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
1251 * be mapped back to @id on both 64 and 32bit ino
1252 * setups. See __kernfs_fh_to_dentry().
1253 */
1254 trace_seq_printf(s: &iter->seq,
1255 fmt: "%3d,%-3d %llx,%-llx %2s %3s ",
1256 MAJOR(t->device), MINOR(t->device),
1257 id & U32_MAX, id >> 32, act, rwbs);
1258 }
1259 } else
1260 trace_seq_printf(s: &iter->seq, fmt: "%3d,%-3d %2s %3s ",
1261 MAJOR(t->device), MINOR(t->device), act, rwbs);
1262}
1263
1264static void blk_log_dump_pdu(struct trace_seq *s,
1265 const struct trace_entry *ent, bool has_cg)
1266{
1267 const unsigned char *pdu_buf;
1268 int pdu_len;
1269 int i, end;
1270
1271 pdu_buf = pdu_start(ent, has_cg);
1272 pdu_len = pdu_real_len(ent, has_cg);
1273
1274 if (!pdu_len)
1275 return;
1276
1277 /* find the last zero that needs to be printed */
1278 for (end = pdu_len - 1; end >= 0; end--)
1279 if (pdu_buf[end])
1280 break;
1281 end++;
1282
1283 trace_seq_putc(s, c: '(');
1284
1285 for (i = 0; i < pdu_len; i++) {
1286
1287 trace_seq_printf(s, fmt: "%s%02x",
1288 i == 0 ? "" : " ", pdu_buf[i]);
1289
1290 /*
1291 * stop when the rest is just zeros and indicate so
1292 * with a ".." appended
1293 */
1294 if (i == end && end != pdu_len - 1) {
1295 trace_seq_puts(s, str: " ..) ");
1296 return;
1297 }
1298 }
1299
1300 trace_seq_puts(s, str: ") ");
1301}
1302
1303static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1304{
1305 char cmd[TASK_COMM_LEN];
1306
1307 trace_find_cmdline(pid: ent->pid, comm: cmd);
1308
1309 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1310 trace_seq_printf(s, fmt: "%u ", t_bytes(ent));
1311 blk_log_dump_pdu(s, ent, has_cg);
1312 trace_seq_printf(s, fmt: "[%s]\n", cmd);
1313 } else {
1314 if (t_sec(ent))
1315 trace_seq_printf(s, fmt: "%llu + %u [%s]\n",
1316 t_sector(ent), t_sec(ent), cmd);
1317 else
1318 trace_seq_printf(s, fmt: "[%s]\n", cmd);
1319 }
1320}
1321
1322static void blk_log_with_error(struct trace_seq *s,
1323 const struct trace_entry *ent, bool has_cg)
1324{
1325 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1326 blk_log_dump_pdu(s, ent, has_cg);
1327 trace_seq_printf(s, fmt: "[%d]\n", t_error(ent));
1328 } else {
1329 if (t_sec(ent))
1330 trace_seq_printf(s, fmt: "%llu + %u [%d]\n",
1331 t_sector(ent),
1332 t_sec(ent), t_error(ent));
1333 else
1334 trace_seq_printf(s, fmt: "%llu [%d]\n",
1335 t_sector(ent), t_error(ent));
1336 }
1337}
1338
1339static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1340{
1341 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1342
1343 trace_seq_printf(s, fmt: "%llu + %u <- (%d,%d) %llu\n",
1344 t_sector(ent), t_sec(ent),
1345 MAJOR(be32_to_cpu(__r->device_from)),
1346 MINOR(be32_to_cpu(__r->device_from)),
1347 be64_to_cpu(__r->sector_from));
1348}
1349
1350static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1351{
1352 char cmd[TASK_COMM_LEN];
1353
1354 trace_find_cmdline(pid: ent->pid, comm: cmd);
1355
1356 trace_seq_printf(s, fmt: "[%s]\n", cmd);
1357}
1358
1359static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1360{
1361 char cmd[TASK_COMM_LEN];
1362
1363 trace_find_cmdline(pid: ent->pid, comm: cmd);
1364
1365 trace_seq_printf(s, fmt: "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1366}
1367
1368static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1369{
1370 char cmd[TASK_COMM_LEN];
1371
1372 trace_find_cmdline(pid: ent->pid, comm: cmd);
1373
1374 trace_seq_printf(s, fmt: "%llu / %llu [%s]\n", t_sector(ent),
1375 get_pdu_int(ent, has_cg), cmd);
1376}
1377
1378static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1379 bool has_cg)
1380{
1381
1382 trace_seq_putmem(s, mem: pdu_start(ent, has_cg),
1383 len: pdu_real_len(ent, has_cg));
1384 trace_seq_putc(s, c: '\n');
1385}
1386
1387/*
1388 * struct tracer operations
1389 */
1390
1391static void blk_tracer_print_header(struct seq_file *m)
1392{
1393 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1394 return;
1395 seq_puts(m, s: "# DEV CPU TIMESTAMP PID ACT FLG\n"
1396 "# | | | | | |\n");
1397}
1398
1399static void blk_tracer_start(struct trace_array *tr)
1400{
1401 blk_tracer_enabled = true;
1402}
1403
1404static int blk_tracer_init(struct trace_array *tr)
1405{
1406 blk_tr = tr;
1407 blk_tracer_start(tr);
1408 return 0;
1409}
1410
1411static void blk_tracer_stop(struct trace_array *tr)
1412{
1413 blk_tracer_enabled = false;
1414}
1415
1416static void blk_tracer_reset(struct trace_array *tr)
1417{
1418 blk_tracer_stop(tr);
1419}
1420
1421static const struct {
1422 const char *act[2];
1423 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1424 bool has_cg);
1425} what2act[] = {
1426 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1427 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1428 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1429 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1430 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1431 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1432 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1433 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1434 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1435 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1436 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1437 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1438 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1439 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1440};
1441
1442static enum print_line_t print_one_line(struct trace_iterator *iter,
1443 bool classic)
1444{
1445 struct trace_array *tr = iter->tr;
1446 struct trace_seq *s = &iter->seq;
1447 const struct blk_io_trace *t;
1448 u16 what;
1449 bool long_act;
1450 blk_log_action_t *log_action;
1451 bool has_cg;
1452
1453 t = te_blk_io_trace(ent: iter->ent);
1454 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1455 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1456 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1457 has_cg = t->action & __BLK_TA_CGROUP;
1458
1459 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1460 log_action(iter, long_act ? "message" : "m", has_cg);
1461 blk_log_msg(s, ent: iter->ent, has_cg);
1462 return trace_handle_return(s);
1463 }
1464
1465 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1466 trace_seq_printf(s, fmt: "Unknown action %x\n", what);
1467 else {
1468 log_action(iter, what2act[what].act[long_act], has_cg);
1469 what2act[what].print(s, iter->ent, has_cg);
1470 }
1471
1472 return trace_handle_return(s);
1473}
1474
1475static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1476 int flags, struct trace_event *event)
1477{
1478 return print_one_line(iter, classic: false);
1479}
1480
1481static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1482{
1483 struct trace_seq *s = &iter->seq;
1484 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1485 const int offset = offsetof(struct blk_io_trace, sector);
1486 struct blk_io_trace old = {
1487 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1488 .time = iter->ts,
1489 };
1490
1491 trace_seq_putmem(s, mem: &old, len: offset);
1492 trace_seq_putmem(s, mem: &t->sector,
1493 len: sizeof(old) - offset + t->pdu_len);
1494}
1495
1496static enum print_line_t
1497blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1498 struct trace_event *event)
1499{
1500 blk_trace_synthesize_old_trace(iter);
1501
1502 return trace_handle_return(s: &iter->seq);
1503}
1504
1505static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1506{
1507 if ((iter->ent->type != TRACE_BLK) ||
1508 !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1509 return TRACE_TYPE_UNHANDLED;
1510
1511 return print_one_line(iter, classic: true);
1512}
1513
1514static int
1515blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1516{
1517 /* don't output context-info for blk_classic output */
1518 if (bit == TRACE_BLK_OPT_CLASSIC) {
1519 if (set)
1520 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1521 else
1522 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1523 }
1524 return 0;
1525}
1526
1527static struct tracer blk_tracer __read_mostly = {
1528 .name = "blk",
1529 .init = blk_tracer_init,
1530 .reset = blk_tracer_reset,
1531 .start = blk_tracer_start,
1532 .stop = blk_tracer_stop,
1533 .print_header = blk_tracer_print_header,
1534 .print_line = blk_tracer_print_line,
1535 .flags = &blk_tracer_flags,
1536 .set_flag = blk_tracer_set_flag,
1537};
1538
1539static struct trace_event_functions trace_blk_event_funcs = {
1540 .trace = blk_trace_event_print,
1541 .binary = blk_trace_event_print_binary,
1542};
1543
1544static struct trace_event trace_blk_event = {
1545 .type = TRACE_BLK,
1546 .funcs = &trace_blk_event_funcs,
1547};
1548
1549static int __init init_blk_tracer(void)
1550{
1551 if (!register_trace_event(event: &trace_blk_event)) {
1552 pr_warn("Warning: could not register block events\n");
1553 return 1;
1554 }
1555
1556 if (register_tracer(type: &blk_tracer) != 0) {
1557 pr_warn("Warning: could not register the block tracer\n");
1558 unregister_trace_event(event: &trace_blk_event);
1559 return 1;
1560 }
1561
1562 return 0;
1563}
1564
1565device_initcall(init_blk_tracer);
1566
1567static int blk_trace_remove_queue(struct request_queue *q)
1568{
1569 struct blk_trace *bt;
1570
1571 bt = rcu_replace_pointer(q->blk_trace, NULL,
1572 lockdep_is_held(&q->debugfs_mutex));
1573 if (bt == NULL)
1574 return -EINVAL;
1575
1576 blk_trace_stop(bt);
1577
1578 put_probe_ref();
1579 synchronize_rcu();
1580 blk_trace_free(q, bt);
1581 return 0;
1582}
1583
1584/*
1585 * Setup everything required to start tracing
1586 */
1587static int blk_trace_setup_queue(struct request_queue *q,
1588 struct block_device *bdev)
1589{
1590 struct blk_trace *bt = NULL;
1591 int ret = -ENOMEM;
1592
1593 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1594 if (!bt)
1595 return -ENOMEM;
1596
1597 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1598 if (!bt->msg_data)
1599 goto free_bt;
1600
1601 bt->dev = bdev->bd_dev;
1602 bt->act_mask = (u16)-1;
1603
1604 blk_trace_setup_lba(bt, bdev);
1605
1606 rcu_assign_pointer(q->blk_trace, bt);
1607 get_probe_ref();
1608 return 0;
1609
1610free_bt:
1611 blk_trace_free(q, bt);
1612 return ret;
1613}
1614
1615/*
1616 * sysfs interface to enable and configure tracing
1617 */
1618
1619static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1620 struct device_attribute *attr,
1621 char *buf);
1622static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1623 struct device_attribute *attr,
1624 const char *buf, size_t count);
1625#define BLK_TRACE_DEVICE_ATTR(_name) \
1626 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1627 sysfs_blk_trace_attr_show, \
1628 sysfs_blk_trace_attr_store)
1629
1630static BLK_TRACE_DEVICE_ATTR(enable);
1631static BLK_TRACE_DEVICE_ATTR(act_mask);
1632static BLK_TRACE_DEVICE_ATTR(pid);
1633static BLK_TRACE_DEVICE_ATTR(start_lba);
1634static BLK_TRACE_DEVICE_ATTR(end_lba);
1635
1636static struct attribute *blk_trace_attrs[] = {
1637 &dev_attr_enable.attr,
1638 &dev_attr_act_mask.attr,
1639 &dev_attr_pid.attr,
1640 &dev_attr_start_lba.attr,
1641 &dev_attr_end_lba.attr,
1642 NULL
1643};
1644
1645struct attribute_group blk_trace_attr_group = {
1646 .name = "trace",
1647 .attrs = blk_trace_attrs,
1648};
1649
1650static const struct {
1651 int mask;
1652 const char *str;
1653} mask_maps[] = {
1654 { BLK_TC_READ, "read" },
1655 { .mask: BLK_TC_WRITE, .str: "write" },
1656 { .mask: BLK_TC_FLUSH, .str: "flush" },
1657 { .mask: BLK_TC_SYNC, .str: "sync" },
1658 { .mask: BLK_TC_QUEUE, .str: "queue" },
1659 { .mask: BLK_TC_REQUEUE, .str: "requeue" },
1660 { .mask: BLK_TC_ISSUE, .str: "issue" },
1661 { .mask: BLK_TC_COMPLETE, .str: "complete" },
1662 { .mask: BLK_TC_FS, .str: "fs" },
1663 { .mask: BLK_TC_PC, .str: "pc" },
1664 { .mask: BLK_TC_NOTIFY, .str: "notify" },
1665 { .mask: BLK_TC_AHEAD, .str: "ahead" },
1666 { .mask: BLK_TC_META, .str: "meta" },
1667 { .mask: BLK_TC_DISCARD, .str: "discard" },
1668 { .mask: BLK_TC_DRV_DATA, .str: "drv_data" },
1669 { .mask: BLK_TC_FUA, .str: "fua" },
1670};
1671
1672static int blk_trace_str2mask(const char *str)
1673{
1674 int i;
1675 int mask = 0;
1676 char *buf, *s, *token;
1677
1678 buf = kstrdup(s: str, GFP_KERNEL);
1679 if (buf == NULL)
1680 return -ENOMEM;
1681 s = strstrip(str: buf);
1682
1683 while (1) {
1684 token = strsep(&s, ",");
1685 if (token == NULL)
1686 break;
1687
1688 if (*token == '\0')
1689 continue;
1690
1691 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1692 if (strcasecmp(s1: token, s2: mask_maps[i].str) == 0) {
1693 mask |= mask_maps[i].mask;
1694 break;
1695 }
1696 }
1697 if (i == ARRAY_SIZE(mask_maps)) {
1698 mask = -EINVAL;
1699 break;
1700 }
1701 }
1702 kfree(objp: buf);
1703
1704 return mask;
1705}
1706
1707static ssize_t blk_trace_mask2str(char *buf, int mask)
1708{
1709 int i;
1710 char *p = buf;
1711
1712 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1713 if (mask & mask_maps[i].mask) {
1714 p += sprintf(buf: p, fmt: "%s%s",
1715 (p == buf) ? "" : ",", mask_maps[i].str);
1716 }
1717 }
1718 *p++ = '\n';
1719
1720 return p - buf;
1721}
1722
1723static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1724 struct device_attribute *attr,
1725 char *buf)
1726{
1727 struct block_device *bdev = dev_to_bdev(dev);
1728 struct request_queue *q = bdev_get_queue(bdev);
1729 struct blk_trace *bt;
1730 ssize_t ret = -ENXIO;
1731
1732 mutex_lock(lock: &q->debugfs_mutex);
1733
1734 bt = rcu_dereference_protected(q->blk_trace,
1735 lockdep_is_held(&q->debugfs_mutex));
1736 if (attr == &dev_attr_enable) {
1737 ret = sprintf(buf, fmt: "%u\n", !!bt);
1738 goto out_unlock_bdev;
1739 }
1740
1741 if (bt == NULL)
1742 ret = sprintf(buf, fmt: "disabled\n");
1743 else if (attr == &dev_attr_act_mask)
1744 ret = blk_trace_mask2str(buf, mask: bt->act_mask);
1745 else if (attr == &dev_attr_pid)
1746 ret = sprintf(buf, fmt: "%u\n", bt->pid);
1747 else if (attr == &dev_attr_start_lba)
1748 ret = sprintf(buf, fmt: "%llu\n", bt->start_lba);
1749 else if (attr == &dev_attr_end_lba)
1750 ret = sprintf(buf, fmt: "%llu\n", bt->end_lba);
1751
1752out_unlock_bdev:
1753 mutex_unlock(lock: &q->debugfs_mutex);
1754 return ret;
1755}
1756
1757static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1758 struct device_attribute *attr,
1759 const char *buf, size_t count)
1760{
1761 struct block_device *bdev = dev_to_bdev(dev);
1762 struct request_queue *q = bdev_get_queue(bdev);
1763 struct blk_trace *bt;
1764 u64 value;
1765 ssize_t ret = -EINVAL;
1766
1767 if (count == 0)
1768 goto out;
1769
1770 if (attr == &dev_attr_act_mask) {
1771 if (kstrtoull(s: buf, base: 0, res: &value)) {
1772 /* Assume it is a list of trace category names */
1773 ret = blk_trace_str2mask(str: buf);
1774 if (ret < 0)
1775 goto out;
1776 value = ret;
1777 }
1778 } else {
1779 if (kstrtoull(s: buf, base: 0, res: &value))
1780 goto out;
1781 }
1782
1783 mutex_lock(lock: &q->debugfs_mutex);
1784
1785 bt = rcu_dereference_protected(q->blk_trace,
1786 lockdep_is_held(&q->debugfs_mutex));
1787 if (attr == &dev_attr_enable) {
1788 if (!!value == !!bt) {
1789 ret = 0;
1790 goto out_unlock_bdev;
1791 }
1792 if (value)
1793 ret = blk_trace_setup_queue(q, bdev);
1794 else
1795 ret = blk_trace_remove_queue(q);
1796 goto out_unlock_bdev;
1797 }
1798
1799 ret = 0;
1800 if (bt == NULL) {
1801 ret = blk_trace_setup_queue(q, bdev);
1802 bt = rcu_dereference_protected(q->blk_trace,
1803 lockdep_is_held(&q->debugfs_mutex));
1804 }
1805
1806 if (ret == 0) {
1807 if (attr == &dev_attr_act_mask)
1808 bt->act_mask = value;
1809 else if (attr == &dev_attr_pid)
1810 bt->pid = value;
1811 else if (attr == &dev_attr_start_lba)
1812 bt->start_lba = value;
1813 else if (attr == &dev_attr_end_lba)
1814 bt->end_lba = value;
1815 }
1816
1817out_unlock_bdev:
1818 mutex_unlock(lock: &q->debugfs_mutex);
1819out:
1820 return ret ? ret : count;
1821}
1822#endif /* CONFIG_BLK_DEV_IO_TRACE */
1823
1824#ifdef CONFIG_EVENT_TRACING
1825
1826/**
1827 * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
1828 * @rwbs: buffer to be filled
1829 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint
1830 *
1831 * Description:
1832 * Maps each request operation and flag to a single character and fills the
1833 * buffer provided by the caller with resulting string.
1834 *
1835 **/
1836void blk_fill_rwbs(char *rwbs, blk_opf_t opf)
1837{
1838 int i = 0;
1839
1840 if (opf & REQ_PREFLUSH)
1841 rwbs[i++] = 'F';
1842
1843 switch (opf & REQ_OP_MASK) {
1844 case REQ_OP_WRITE:
1845 rwbs[i++] = 'W';
1846 break;
1847 case REQ_OP_DISCARD:
1848 rwbs[i++] = 'D';
1849 break;
1850 case REQ_OP_SECURE_ERASE:
1851 rwbs[i++] = 'D';
1852 rwbs[i++] = 'E';
1853 break;
1854 case REQ_OP_FLUSH:
1855 rwbs[i++] = 'F';
1856 break;
1857 case REQ_OP_READ:
1858 rwbs[i++] = 'R';
1859 break;
1860 case REQ_OP_ZONE_APPEND:
1861 rwbs[i++] = 'Z';
1862 rwbs[i++] = 'A';
1863 break;
1864 case REQ_OP_ZONE_RESET:
1865 case REQ_OP_ZONE_RESET_ALL:
1866 rwbs[i++] = 'Z';
1867 rwbs[i++] = 'R';
1868 if ((opf & REQ_OP_MASK) == REQ_OP_ZONE_RESET_ALL)
1869 rwbs[i++] = 'A';
1870 break;
1871 case REQ_OP_ZONE_FINISH:
1872 rwbs[i++] = 'Z';
1873 rwbs[i++] = 'F';
1874 break;
1875 case REQ_OP_ZONE_OPEN:
1876 rwbs[i++] = 'Z';
1877 rwbs[i++] = 'O';
1878 break;
1879 case REQ_OP_ZONE_CLOSE:
1880 rwbs[i++] = 'Z';
1881 rwbs[i++] = 'C';
1882 break;
1883 default:
1884 rwbs[i++] = 'N';
1885 }
1886
1887 if (opf & REQ_FUA)
1888 rwbs[i++] = 'F';
1889 if (opf & REQ_RAHEAD)
1890 rwbs[i++] = 'A';
1891 if (opf & REQ_SYNC)
1892 rwbs[i++] = 'S';
1893 if (opf & REQ_META)
1894 rwbs[i++] = 'M';
1895 if (opf & REQ_ATOMIC)
1896 rwbs[i++] = 'U';
1897
1898 WARN_ON_ONCE(i >= RWBS_LEN);
1899
1900 rwbs[i] = '\0';
1901}
1902EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1903
1904#endif /* CONFIG_EVENT_TRACING */
1905
1906