2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <linux/uaccess.h>
28 #include "trace_output.h"
30 static unsigned int blktrace_seq __read_mostly = 1;
32 static struct trace_array *blk_tr;
33 static bool blk_tracer_enabled __read_mostly;
35 /* Select an alternative, minimalistic output than the original one */
36 #define TRACE_BLK_OPT_CLASSIC 0x1
38 static struct tracer_opt blk_tracer_opts[] = {
39 /* Default disable the minimalistic output */
40 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 static struct tracer_flags blk_tracer_flags = {
46 .opts = blk_tracer_opts,
49 /* Global reference count of probes */
50 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
52 static void blk_register_tracepoints(void);
53 static void blk_unregister_tracepoints(void);
56 * Send out a notify message.
58 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
59 const void *data, size_t len)
61 struct blk_io_trace *t;
62 struct ring_buffer_event *event = NULL;
64 int cpu = smp_processor_id();
65 bool blk_tracer = blk_tracer_enabled;
69 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
74 t = ring_buffer_event_data(event);
81 t = relay_reserve(bt->rchan, sizeof(*t) + len);
83 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
84 t->time = ktime_to_ns(ktime_get());
91 memcpy((void *) t + sizeof(*t), data, len);
94 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
99 * Send out a notify for this process, if we haven't done so since a trace
102 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
104 tsk->btrace_seq = blktrace_seq;
105 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
108 static void trace_note_time(struct blk_trace *bt)
114 getnstimeofday(&now);
115 words[0] = now.tv_sec;
116 words[1] = now.tv_nsec;
118 local_irq_save(flags);
119 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
120 local_irq_restore(flags);
123 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
130 if (unlikely(bt->trace_state != Blktrace_running &&
131 !blk_tracer_enabled))
134 local_irq_save(flags);
135 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
137 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
140 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
141 local_irq_restore(flags);
143 EXPORT_SYMBOL_GPL(__trace_note_message);
145 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
148 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
150 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
152 if (bt->pid && pid != bt->pid)
159 * Data direction bit lookup
161 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
162 BLK_TC_ACT(BLK_TC_WRITE) };
164 /* The ilog2() calls fall out because they're constant */
165 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
166 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
169 * The worker for the various blk_add_trace*() types. Fills out a
170 * blk_io_trace structure and places it in a per-cpu subbuffer.
172 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
173 int rw, u32 what, int error, int pdu_len, void *pdu_data)
175 struct task_struct *tsk = current;
176 struct ring_buffer_event *event = NULL;
177 struct blk_io_trace *t;
178 unsigned long flags = 0;
179 unsigned long *sequence;
182 bool blk_tracer = blk_tracer_enabled;
184 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
187 what |= ddir_act[rw & WRITE];
188 what |= MASK_TC_BIT(rw, BARRIER);
189 what |= MASK_TC_BIT(rw, SYNCIO);
190 what |= MASK_TC_BIT(rw, AHEAD);
191 what |= MASK_TC_BIT(rw, META);
192 what |= MASK_TC_BIT(rw, DISCARD);
195 if (act_log_check(bt, what, sector, pid))
197 cpu = raw_smp_processor_id();
200 tracing_record_cmdline(current);
202 pc = preempt_count();
203 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
204 sizeof(*t) + pdu_len,
208 t = ring_buffer_event_data(event);
213 * A word about the locking here - we disable interrupts to reserve
214 * some space in the relay per-cpu buffer, to prevent an irq
215 * from coming in and stepping on our toes.
217 local_irq_save(flags);
219 if (unlikely(tsk->btrace_seq != blktrace_seq))
220 trace_note_tsk(bt, tsk);
222 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
224 sequence = per_cpu_ptr(bt->sequence, cpu);
226 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
227 t->sequence = ++(*sequence);
228 t->time = ktime_to_ns(ktime_get());
231 * These two are not needed in ftrace as they are in the
232 * generic trace_entry, filled by tracing_generic_entry_update,
233 * but for the trace_event->bin() synthesizer benefit we do it
244 t->pdu_len = pdu_len;
247 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
250 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
255 local_irq_restore(flags);
258 static struct dentry *blk_tree_root;
259 static DEFINE_MUTEX(blk_tree_mutex);
261 static void blk_trace_free(struct blk_trace *bt)
263 debugfs_remove(bt->msg_file);
264 debugfs_remove(bt->dropped_file);
265 debugfs_remove(bt->dir);
266 relay_close(bt->rchan);
267 free_percpu(bt->sequence);
268 free_percpu(bt->msg_data);
272 static void blk_trace_cleanup(struct blk_trace *bt)
275 if (atomic_dec_and_test(&blk_probes_ref))
276 blk_unregister_tracepoints();
279 int blk_trace_remove(struct request_queue *q)
281 struct blk_trace *bt;
283 bt = xchg(&q->blk_trace, NULL);
287 if (bt->trace_state != Blktrace_running)
288 blk_trace_cleanup(bt);
292 EXPORT_SYMBOL_GPL(blk_trace_remove);
294 static int blk_dropped_open(struct inode *inode, struct file *filp)
296 filp->private_data = inode->i_private;
301 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
302 size_t count, loff_t *ppos)
304 struct blk_trace *bt = filp->private_data;
307 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
309 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
312 static const struct file_operations blk_dropped_fops = {
313 .owner = THIS_MODULE,
314 .open = blk_dropped_open,
315 .read = blk_dropped_read,
318 static int blk_msg_open(struct inode *inode, struct file *filp)
320 filp->private_data = inode->i_private;
325 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
326 size_t count, loff_t *ppos)
329 struct blk_trace *bt;
331 if (count >= BLK_TN_MAX_MSG)
334 msg = kmalloc(count + 1, GFP_KERNEL);
338 if (copy_from_user(msg, buffer, count)) {
344 bt = filp->private_data;
345 __trace_note_message(bt, "%s", msg);
351 static const struct file_operations blk_msg_fops = {
352 .owner = THIS_MODULE,
353 .open = blk_msg_open,
354 .write = blk_msg_write,
358 * Keep track of how many times we encountered a full subbuffer, to aid
359 * the user space app in telling how many lost events there were.
361 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
362 void *prev_subbuf, size_t prev_padding)
364 struct blk_trace *bt;
366 if (!relay_buf_full(buf))
369 bt = buf->chan->private_data;
370 atomic_inc(&bt->dropped);
374 static int blk_remove_buf_file_callback(struct dentry *dentry)
376 struct dentry *parent = dentry->d_parent;
377 debugfs_remove(dentry);
380 * this will fail for all but the last file, but that is ok. what we
381 * care about is the top level buts->name directory going away, when
382 * the last trace file is gone. Then we don't have to rmdir() that
383 * manually on trace stop, so it nicely solves the issue with
384 * force killing of running traces.
387 debugfs_remove(parent);
391 static struct dentry *blk_create_buf_file_callback(const char *filename,
392 struct dentry *parent,
394 struct rchan_buf *buf,
397 return debugfs_create_file(filename, mode, parent, buf,
398 &relay_file_operations);
401 static struct rchan_callbacks blk_relay_callbacks = {
402 .subbuf_start = blk_subbuf_start_callback,
403 .create_buf_file = blk_create_buf_file_callback,
404 .remove_buf_file = blk_remove_buf_file_callback,
407 static void blk_trace_setup_lba(struct blk_trace *bt,
408 struct block_device *bdev)
410 struct hd_struct *part = NULL;
413 part = bdev->bd_part;
416 bt->start_lba = part->start_sect;
417 bt->end_lba = part->start_sect + part->nr_sects;
425 * Setup everything required to start tracing
427 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
428 struct block_device *bdev,
429 struct blk_user_trace_setup *buts)
431 struct blk_trace *old_bt, *bt = NULL;
432 struct dentry *dir = NULL;
435 if (!buts->buf_size || !buts->buf_nr)
438 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
439 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
442 * some device names have larger paths - convert the slashes
443 * to underscores for this to work as expected
445 for (i = 0; i < strlen(buts->name); i++)
446 if (buts->name[i] == '/')
449 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
454 bt->sequence = alloc_percpu(unsigned long);
458 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
464 mutex_lock(&blk_tree_mutex);
465 if (!blk_tree_root) {
466 blk_tree_root = debugfs_create_dir("block", NULL);
467 if (!blk_tree_root) {
468 mutex_unlock(&blk_tree_mutex);
472 mutex_unlock(&blk_tree_mutex);
474 dir = debugfs_create_dir(buts->name, blk_tree_root);
481 atomic_set(&bt->dropped, 0);
484 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
486 if (!bt->dropped_file)
489 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
493 bt->rchan = relay_open("trace", dir, buts->buf_size,
494 buts->buf_nr, &blk_relay_callbacks, bt);
498 bt->act_mask = buts->act_mask;
500 bt->act_mask = (u16) -1;
502 blk_trace_setup_lba(bt, bdev);
504 /* overwrite with user settings */
506 bt->start_lba = buts->start_lba;
508 bt->end_lba = buts->end_lba;
511 bt->trace_state = Blktrace_setup;
514 old_bt = xchg(&q->blk_trace, bt);
516 (void) xchg(&q->blk_trace, old_bt);
520 if (atomic_inc_return(&blk_probes_ref) == 1)
521 blk_register_tracepoints();
529 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
530 struct block_device *bdev,
533 struct blk_user_trace_setup buts;
536 ret = copy_from_user(&buts, arg, sizeof(buts));
540 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
544 if (copy_to_user(arg, &buts, sizeof(buts)))
549 EXPORT_SYMBOL_GPL(blk_trace_setup);
551 int blk_trace_startstop(struct request_queue *q, int start)
554 struct blk_trace *bt = q->blk_trace;
560 * For starting a trace, we can transition from a setup or stopped
561 * trace. For stopping a trace, the state must be running
565 if (bt->trace_state == Blktrace_setup ||
566 bt->trace_state == Blktrace_stopped) {
569 bt->trace_state = Blktrace_running;
575 if (bt->trace_state == Blktrace_running) {
576 bt->trace_state = Blktrace_stopped;
577 relay_flush(bt->rchan);
584 EXPORT_SYMBOL_GPL(blk_trace_startstop);
587 * blk_trace_ioctl: - handle the ioctls associated with tracing
588 * @bdev: the block device
589 * @cmd: the ioctl cmd
590 * @arg: the argument data, if any
593 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
595 struct request_queue *q;
597 char b[BDEVNAME_SIZE];
599 q = bdev_get_queue(bdev);
603 mutex_lock(&bdev->bd_mutex);
608 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
613 ret = blk_trace_startstop(q, start);
615 case BLKTRACETEARDOWN:
616 ret = blk_trace_remove(q);
623 mutex_unlock(&bdev->bd_mutex);
628 * blk_trace_shutdown: - stop and cleanup trace structures
629 * @q: the request queue associated with the device
632 void blk_trace_shutdown(struct request_queue *q)
635 blk_trace_startstop(q, 0);
645 * blk_add_trace_rq - Add a trace for a request oriented action
646 * @q: queue the io is for
647 * @rq: the source request
651 * Records an action against a request. Will log the bio offset + size.
654 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
657 struct blk_trace *bt = q->blk_trace;
658 int rw = rq->cmd_flags & 0x03;
663 if (blk_discard_rq(rq))
664 rw |= (1 << BIO_RW_DISCARD);
666 if (blk_pc_request(rq)) {
667 what |= BLK_TC_ACT(BLK_TC_PC);
668 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
669 rq->cmd_len, rq->cmd);
671 what |= BLK_TC_ACT(BLK_TC_FS);
672 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
673 rw, what, rq->errors, 0, NULL);
677 static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
679 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
682 static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
684 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
687 static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
689 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
692 static void blk_add_trace_rq_requeue(struct request_queue *q,
695 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
698 static void blk_add_trace_rq_complete(struct request_queue *q,
701 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
705 * blk_add_trace_bio - Add a trace for a bio oriented action
706 * @q: queue the io is for
707 * @bio: the source bio
711 * Records an action against a bio. Will log the bio offset + size.
714 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
717 struct blk_trace *bt = q->blk_trace;
722 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
723 !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
726 static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
728 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
731 static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
733 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
736 static void blk_add_trace_bio_backmerge(struct request_queue *q,
739 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
742 static void blk_add_trace_bio_frontmerge(struct request_queue *q,
745 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
748 static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
750 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
753 static void blk_add_trace_getrq(struct request_queue *q,
754 struct bio *bio, int rw)
757 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
759 struct blk_trace *bt = q->blk_trace;
762 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
767 static void blk_add_trace_sleeprq(struct request_queue *q,
768 struct bio *bio, int rw)
771 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
773 struct blk_trace *bt = q->blk_trace;
776 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
781 static void blk_add_trace_plug(struct request_queue *q)
783 struct blk_trace *bt = q->blk_trace;
786 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
789 static void blk_add_trace_unplug_io(struct request_queue *q)
791 struct blk_trace *bt = q->blk_trace;
794 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
795 __be64 rpdu = cpu_to_be64(pdu);
797 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
798 sizeof(rpdu), &rpdu);
802 static void blk_add_trace_unplug_timer(struct request_queue *q)
804 struct blk_trace *bt = q->blk_trace;
807 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
808 __be64 rpdu = cpu_to_be64(pdu);
810 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
811 sizeof(rpdu), &rpdu);
815 static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
818 struct blk_trace *bt = q->blk_trace;
821 __be64 rpdu = cpu_to_be64(pdu);
823 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
824 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
825 sizeof(rpdu), &rpdu);
830 * blk_add_trace_remap - Add a trace for a remap operation
831 * @q: queue the io is for
832 * @bio: the source bio
833 * @dev: target device
834 * @from: source sector
837 * Device mapper or raid target sometimes need to split a bio because
838 * it spans a stripe (or similar). Add a trace for that action.
841 static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
842 dev_t dev, sector_t from)
844 struct blk_trace *bt = q->blk_trace;
845 struct blk_io_trace_remap r;
850 r.device_from = cpu_to_be32(dev);
851 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
852 r.sector_from = cpu_to_be64(from);
854 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
855 BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
860 * blk_add_driver_data - Add binary message with driver-specific data
861 * @q: queue the io is for
863 * @data: driver-specific data
864 * @len: length of driver-specific data
867 * Some drivers might want to write driver-specific data per request.
870 void blk_add_driver_data(struct request_queue *q,
872 void *data, size_t len)
874 struct blk_trace *bt = q->blk_trace;
879 if (blk_pc_request(rq))
880 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
881 rq->errors, len, data);
883 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
884 0, BLK_TA_DRV_DATA, rq->errors, len, data);
886 EXPORT_SYMBOL_GPL(blk_add_driver_data);
888 static void blk_register_tracepoints(void)
892 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
894 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
896 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
898 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
900 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
902 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
904 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
906 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
908 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
910 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
912 ret = register_trace_block_getrq(blk_add_trace_getrq);
914 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
916 ret = register_trace_block_plug(blk_add_trace_plug);
918 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
920 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
922 ret = register_trace_block_split(blk_add_trace_split);
924 ret = register_trace_block_remap(blk_add_trace_remap);
928 static void blk_unregister_tracepoints(void)
930 unregister_trace_block_remap(blk_add_trace_remap);
931 unregister_trace_block_split(blk_add_trace_split);
932 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
933 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
934 unregister_trace_block_plug(blk_add_trace_plug);
935 unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
936 unregister_trace_block_getrq(blk_add_trace_getrq);
937 unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
938 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
939 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
940 unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
941 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
942 unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
943 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
944 unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
945 unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
946 unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
948 tracepoint_synchronize_unregister();
952 * struct blk_io_tracer formatting routines
955 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
958 int tc = t->action >> BLK_TC_SHIFT;
960 if (t->action == BLK_TN_MESSAGE) {
965 if (tc & BLK_TC_DISCARD)
967 else if (tc & BLK_TC_WRITE)
974 if (tc & BLK_TC_AHEAD)
976 if (tc & BLK_TC_BARRIER)
978 if (tc & BLK_TC_SYNC)
980 if (tc & BLK_TC_META)
987 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
989 return (const struct blk_io_trace *)ent;
992 static inline const void *pdu_start(const struct trace_entry *ent)
994 return te_blk_io_trace(ent) + 1;
997 static inline u32 t_action(const struct trace_entry *ent)
999 return te_blk_io_trace(ent)->action;
1002 static inline u32 t_bytes(const struct trace_entry *ent)
1004 return te_blk_io_trace(ent)->bytes;
1007 static inline u32 t_sec(const struct trace_entry *ent)
1009 return te_blk_io_trace(ent)->bytes >> 9;
1012 static inline unsigned long long t_sector(const struct trace_entry *ent)
1014 return te_blk_io_trace(ent)->sector;
1017 static inline __u16 t_error(const struct trace_entry *ent)
1019 return te_blk_io_trace(ent)->error;
1022 static __u64 get_pdu_int(const struct trace_entry *ent)
1024 const __u64 *val = pdu_start(ent);
1025 return be64_to_cpu(*val);
1028 static void get_pdu_remap(const struct trace_entry *ent,
1029 struct blk_io_trace_remap *r)
1031 const struct blk_io_trace_remap *__r = pdu_start(ent);
1032 __u64 sector_from = __r->sector_from;
1034 r->device_from = be32_to_cpu(__r->device_from);
1035 r->device_to = be32_to_cpu(__r->device_to);
1036 r->sector_from = be64_to_cpu(sector_from);
1039 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1041 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1044 unsigned long long ts = iter->ts;
1045 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1046 unsigned secs = (unsigned long)ts;
1047 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1051 return trace_seq_printf(&iter->seq,
1052 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1053 MAJOR(t->device), MINOR(t->device), iter->cpu,
1054 secs, nsec_rem, iter->ent->pid, act, rwbs);
1057 static int blk_log_action(struct trace_iterator *iter, const char *act)
1060 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1063 return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1064 MAJOR(t->device), MINOR(t->device), act, rwbs);
1067 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1069 const unsigned char *pdu_buf;
1073 pdu_buf = pdu_start(ent);
1074 pdu_len = te_blk_io_trace(ent)->pdu_len;
1079 /* find the last zero that needs to be printed */
1080 for (end = pdu_len - 1; end >= 0; end--)
1085 if (!trace_seq_putc(s, '('))
1088 for (i = 0; i < pdu_len; i++) {
1090 ret = trace_seq_printf(s, "%s%02x",
1091 i == 0 ? "" : " ", pdu_buf[i]);
1096 * stop when the rest is just zeroes and indicate so
1097 * with a ".." appended
1099 if (i == end && end != pdu_len - 1)
1100 return trace_seq_puts(s, " ..) ");
1103 return trace_seq_puts(s, ") ");
1106 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1108 char cmd[TASK_COMM_LEN];
1110 trace_find_cmdline(ent->pid, cmd);
1112 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1115 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1118 ret = blk_log_dump_pdu(s, ent);
1121 return trace_seq_printf(s, "[%s]\n", cmd);
1124 return trace_seq_printf(s, "%llu + %u [%s]\n",
1125 t_sector(ent), t_sec(ent), cmd);
1126 return trace_seq_printf(s, "[%s]\n", cmd);
1130 static int blk_log_with_error(struct trace_seq *s,
1131 const struct trace_entry *ent)
1133 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1136 ret = blk_log_dump_pdu(s, ent);
1138 return trace_seq_printf(s, "[%d]\n", t_error(ent));
1142 return trace_seq_printf(s, "%llu + %u [%d]\n",
1144 t_sec(ent), t_error(ent));
1145 return trace_seq_printf(s, "%llu [%d]\n",
1146 t_sector(ent), t_error(ent));
1150 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1152 struct blk_io_trace_remap r = { .device_from = 0, };
1154 get_pdu_remap(ent, &r);
1155 return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1156 t_sector(ent), t_sec(ent),
1157 MAJOR(r.device_from), MINOR(r.device_from),
1158 (unsigned long long)r.sector_from);
1161 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1163 char cmd[TASK_COMM_LEN];
1165 trace_find_cmdline(ent->pid, cmd);
1167 return trace_seq_printf(s, "[%s]\n", cmd);
1170 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1172 char cmd[TASK_COMM_LEN];
1174 trace_find_cmdline(ent->pid, cmd);
1176 return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1179 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1181 char cmd[TASK_COMM_LEN];
1183 trace_find_cmdline(ent->pid, cmd);
1185 return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1186 get_pdu_int(ent), cmd);
1189 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1192 const struct blk_io_trace *t = te_blk_io_trace(ent);
1194 ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1196 return trace_seq_putc(s, '\n');
1201 * struct tracer operations
1204 static void blk_tracer_print_header(struct seq_file *m)
1206 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1208 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1212 static void blk_tracer_start(struct trace_array *tr)
1214 blk_tracer_enabled = true;
1217 static int blk_tracer_init(struct trace_array *tr)
1220 blk_tracer_start(tr);
1224 static void blk_tracer_stop(struct trace_array *tr)
1226 blk_tracer_enabled = false;
1229 static void blk_tracer_reset(struct trace_array *tr)
1231 blk_tracer_stop(tr);
1234 static const struct {
1236 int (*print)(struct trace_seq *s, const struct trace_entry *ent);
1238 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1239 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1240 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1241 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1242 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1243 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1244 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1245 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1246 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1247 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1248 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1249 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1250 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1251 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1252 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1255 static enum print_line_t print_one_line(struct trace_iterator *iter,
1258 struct trace_seq *s = &iter->seq;
1259 const struct blk_io_trace *t;
1263 blk_log_action_t *log_action;
1265 t = te_blk_io_trace(iter->ent);
1266 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1267 long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1268 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1270 if (t->action == BLK_TN_MESSAGE) {
1271 ret = log_action(iter, long_act ? "message" : "m");
1273 ret = blk_log_msg(s, iter->ent);
1277 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1278 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1280 ret = log_action(iter, what2act[what].act[long_act]);
1282 ret = what2act[what].print(s, iter->ent);
1285 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1288 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1291 return print_one_line(iter, false);
1294 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1296 struct trace_seq *s = &iter->seq;
1297 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1298 const int offset = offsetof(struct blk_io_trace, sector);
1299 struct blk_io_trace old = {
1300 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1304 if (!trace_seq_putmem(s, &old, offset))
1306 return trace_seq_putmem(s, &t->sector,
1307 sizeof(old) - offset + t->pdu_len);
1310 static enum print_line_t
1311 blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1313 return blk_trace_synthesize_old_trace(iter) ?
1314 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1317 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1319 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1320 return TRACE_TYPE_UNHANDLED;
1322 return print_one_line(iter, true);
1325 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1327 /* don't output context-info for blk_classic output */
1328 if (bit == TRACE_BLK_OPT_CLASSIC) {
1330 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1332 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1337 static struct tracer blk_tracer __read_mostly = {
1339 .init = blk_tracer_init,
1340 .reset = blk_tracer_reset,
1341 .start = blk_tracer_start,
1342 .stop = blk_tracer_stop,
1343 .print_header = blk_tracer_print_header,
1344 .print_line = blk_tracer_print_line,
1345 .flags = &blk_tracer_flags,
1346 .set_flag = blk_tracer_set_flag,
1349 static struct trace_event trace_blk_event = {
1351 .trace = blk_trace_event_print,
1352 .binary = blk_trace_event_print_binary,
1355 static int __init init_blk_tracer(void)
1357 if (!register_ftrace_event(&trace_blk_event)) {
1358 pr_warning("Warning: could not register block events\n");
1362 if (register_tracer(&blk_tracer) != 0) {
1363 pr_warning("Warning: could not register the block tracer\n");
1364 unregister_ftrace_event(&trace_blk_event);
1371 device_initcall(init_blk_tracer);
1373 static int blk_trace_remove_queue(struct request_queue *q)
1375 struct blk_trace *bt;
1377 bt = xchg(&q->blk_trace, NULL);
1381 if (atomic_dec_and_test(&blk_probes_ref))
1382 blk_unregister_tracepoints();
1389 * Setup everything required to start tracing
1391 static int blk_trace_setup_queue(struct request_queue *q,
1392 struct block_device *bdev)
1394 struct blk_trace *old_bt, *bt = NULL;
1397 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1401 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1405 bt->dev = bdev->bd_dev;
1406 bt->act_mask = (u16)-1;
1408 blk_trace_setup_lba(bt, bdev);
1410 old_bt = xchg(&q->blk_trace, bt);
1411 if (old_bt != NULL) {
1412 (void)xchg(&q->blk_trace, old_bt);
1417 if (atomic_inc_return(&blk_probes_ref) == 1)
1418 blk_register_tracepoints();
1427 * sysfs interface to enable and configure tracing
1430 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1431 struct device_attribute *attr,
1433 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1434 struct device_attribute *attr,
1435 const char *buf, size_t count);
1436 #define BLK_TRACE_DEVICE_ATTR(_name) \
1437 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1438 sysfs_blk_trace_attr_show, \
1439 sysfs_blk_trace_attr_store)
1441 static BLK_TRACE_DEVICE_ATTR(enable);
1442 static BLK_TRACE_DEVICE_ATTR(act_mask);
1443 static BLK_TRACE_DEVICE_ATTR(pid);
1444 static BLK_TRACE_DEVICE_ATTR(start_lba);
1445 static BLK_TRACE_DEVICE_ATTR(end_lba);
1447 static struct attribute *blk_trace_attrs[] = {
1448 &dev_attr_enable.attr,
1449 &dev_attr_act_mask.attr,
1451 &dev_attr_start_lba.attr,
1452 &dev_attr_end_lba.attr,
1456 struct attribute_group blk_trace_attr_group = {
1458 .attrs = blk_trace_attrs,
1461 static const struct {
1465 { BLK_TC_READ, "read" },
1466 { BLK_TC_WRITE, "write" },
1467 { BLK_TC_BARRIER, "barrier" },
1468 { BLK_TC_SYNC, "sync" },
1469 { BLK_TC_QUEUE, "queue" },
1470 { BLK_TC_REQUEUE, "requeue" },
1471 { BLK_TC_ISSUE, "issue" },
1472 { BLK_TC_COMPLETE, "complete" },
1473 { BLK_TC_FS, "fs" },
1474 { BLK_TC_PC, "pc" },
1475 { BLK_TC_AHEAD, "ahead" },
1476 { BLK_TC_META, "meta" },
1477 { BLK_TC_DISCARD, "discard" },
1478 { BLK_TC_DRV_DATA, "drv_data" },
1481 static int blk_trace_str2mask(const char *str)
1485 char *buf, *s, *token;
1487 buf = kstrdup(str, GFP_KERNEL);
1493 token = strsep(&s, ",");
1500 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1501 if (strcasecmp(token, mask_maps[i].str) == 0) {
1502 mask |= mask_maps[i].mask;
1506 if (i == ARRAY_SIZE(mask_maps)) {
1516 static ssize_t blk_trace_mask2str(char *buf, int mask)
1521 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1522 if (mask & mask_maps[i].mask) {
1523 p += sprintf(p, "%s%s",
1524 (p == buf) ? "" : ",", mask_maps[i].str);
1532 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1534 if (bdev->bd_disk == NULL)
1537 return bdev_get_queue(bdev);
1540 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1541 struct device_attribute *attr,
1544 struct hd_struct *p = dev_to_part(dev);
1545 struct request_queue *q;
1546 struct block_device *bdev;
1547 ssize_t ret = -ENXIO;
1550 bdev = bdget(part_devt(p));
1552 goto out_unlock_kernel;
1554 q = blk_trace_get_queue(bdev);
1558 mutex_lock(&bdev->bd_mutex);
1560 if (attr == &dev_attr_enable) {
1561 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1562 goto out_unlock_bdev;
1565 if (q->blk_trace == NULL)
1566 ret = sprintf(buf, "disabled\n");
1567 else if (attr == &dev_attr_act_mask)
1568 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1569 else if (attr == &dev_attr_pid)
1570 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1571 else if (attr == &dev_attr_start_lba)
1572 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1573 else if (attr == &dev_attr_end_lba)
1574 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1577 mutex_unlock(&bdev->bd_mutex);
1585 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1586 struct device_attribute *attr,
1587 const char *buf, size_t count)
1589 struct block_device *bdev;
1590 struct request_queue *q;
1591 struct hd_struct *p;
1593 ssize_t ret = -EINVAL;
1598 if (attr == &dev_attr_act_mask) {
1599 if (sscanf(buf, "%llx", &value) != 1) {
1600 /* Assume it is a list of trace category names */
1601 ret = blk_trace_str2mask(buf);
1606 } else if (sscanf(buf, "%llu", &value) != 1)
1612 p = dev_to_part(dev);
1613 bdev = bdget(part_devt(p));
1615 goto out_unlock_kernel;
1617 q = blk_trace_get_queue(bdev);
1621 mutex_lock(&bdev->bd_mutex);
1623 if (attr == &dev_attr_enable) {
1625 ret = blk_trace_setup_queue(q, bdev);
1627 ret = blk_trace_remove_queue(q);
1628 goto out_unlock_bdev;
1632 if (q->blk_trace == NULL)
1633 ret = blk_trace_setup_queue(q, bdev);
1636 if (attr == &dev_attr_act_mask)
1637 q->blk_trace->act_mask = value;
1638 else if (attr == &dev_attr_pid)
1639 q->blk_trace->pid = value;
1640 else if (attr == &dev_attr_start_lba)
1641 q->blk_trace->start_lba = value;
1642 else if (attr == &dev_attr_end_lba)
1643 q->blk_trace->end_lba = value;
1647 mutex_unlock(&bdev->bd_mutex);
1653 return ret ? ret : count;
1656 int blk_trace_init_sysfs(struct device *dev)
1658 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);