ftrace: Don't increment @pos in g_start()
[linux-2.6] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <linux/uaccess.h>
27
28 #include <trace/events/block.h>
29
30 #include "trace_output.h"
31
32 #ifdef CONFIG_BLK_DEV_IO_TRACE
33
34 static unsigned int blktrace_seq __read_mostly = 1;
35
36 static struct trace_array *blk_tr;
37 static bool blk_tracer_enabled __read_mostly;
38
39 /* Select an alternative, minimalistic output than the original one */
40 #define TRACE_BLK_OPT_CLASSIC   0x1
41
42 static struct tracer_opt blk_tracer_opts[] = {
43         /* Default disable the minimalistic output */
44         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
45         { }
46 };
47
48 static struct tracer_flags blk_tracer_flags = {
49         .val  = 0,
50         .opts = blk_tracer_opts,
51 };
52
53 /* Global reference count of probes */
54 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
55
56 static void blk_register_tracepoints(void);
57 static void blk_unregister_tracepoints(void);
58
59 /*
60  * Send out a notify message.
61  */
62 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
63                        const void *data, size_t len)
64 {
65         struct blk_io_trace *t;
66         struct ring_buffer_event *event = NULL;
67         int pc = 0;
68         int cpu = smp_processor_id();
69         bool blk_tracer = blk_tracer_enabled;
70
71         if (blk_tracer) {
72                 pc = preempt_count();
73                 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
74                                                   sizeof(*t) + len,
75                                                   0, pc);
76                 if (!event)
77                         return;
78                 t = ring_buffer_event_data(event);
79                 goto record_it;
80         }
81
82         if (!bt->rchan)
83                 return;
84
85         t = relay_reserve(bt->rchan, sizeof(*t) + len);
86         if (t) {
87                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
88                 t->time = ktime_to_ns(ktime_get());
89 record_it:
90                 t->device = bt->dev;
91                 t->action = action;
92                 t->pid = pid;
93                 t->cpu = cpu;
94                 t->pdu_len = len;
95                 memcpy((void *) t + sizeof(*t), data, len);
96
97                 if (blk_tracer)
98                         trace_buffer_unlock_commit(blk_tr, event, 0, pc);
99         }
100 }
101
102 /*
103  * Send out a notify for this process, if we haven't done so since a trace
104  * started
105  */
106 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
107 {
108         tsk->btrace_seq = blktrace_seq;
109         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
110 }
111
112 static void trace_note_time(struct blk_trace *bt)
113 {
114         struct timespec now;
115         unsigned long flags;
116         u32 words[2];
117
118         getnstimeofday(&now);
119         words[0] = now.tv_sec;
120         words[1] = now.tv_nsec;
121
122         local_irq_save(flags);
123         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
124         local_irq_restore(flags);
125 }
126
127 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
128 {
129         int n;
130         va_list args;
131         unsigned long flags;
132         char *buf;
133
134         if (unlikely(bt->trace_state != Blktrace_running &&
135                      !blk_tracer_enabled))
136                 return;
137
138         local_irq_save(flags);
139         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
140         va_start(args, fmt);
141         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
142         va_end(args);
143
144         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
145         local_irq_restore(flags);
146 }
147 EXPORT_SYMBOL_GPL(__trace_note_message);
148
149 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
150                          pid_t pid)
151 {
152         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
153                 return 1;
154         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
155                 return 1;
156         if (bt->pid && pid != bt->pid)
157                 return 1;
158
159         return 0;
160 }
161
162 /*
163  * Data direction bit lookup
164  */
165 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
166                                  BLK_TC_ACT(BLK_TC_WRITE) };
167
168 /* The ilog2() calls fall out because they're constant */
169 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
170           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
171
172 /*
173  * The worker for the various blk_add_trace*() types. Fills out a
174  * blk_io_trace structure and places it in a per-cpu subbuffer.
175  */
176 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
177                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
178 {
179         struct task_struct *tsk = current;
180         struct ring_buffer_event *event = NULL;
181         struct blk_io_trace *t;
182         unsigned long flags = 0;
183         unsigned long *sequence;
184         pid_t pid;
185         int cpu, pc = 0;
186         bool blk_tracer = blk_tracer_enabled;
187
188         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
189                 return;
190
191         what |= ddir_act[rw & WRITE];
192         what |= MASK_TC_BIT(rw, BARRIER);
193         what |= MASK_TC_BIT(rw, SYNCIO);
194         what |= MASK_TC_BIT(rw, AHEAD);
195         what |= MASK_TC_BIT(rw, META);
196         what |= MASK_TC_BIT(rw, DISCARD);
197
198         pid = tsk->pid;
199         if (act_log_check(bt, what, sector, pid))
200                 return;
201         cpu = raw_smp_processor_id();
202
203         if (blk_tracer) {
204                 tracing_record_cmdline(current);
205
206                 pc = preempt_count();
207                 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
208                                                   sizeof(*t) + pdu_len,
209                                                   0, pc);
210                 if (!event)
211                         return;
212                 t = ring_buffer_event_data(event);
213                 goto record_it;
214         }
215
216         /*
217          * A word about the locking here - we disable interrupts to reserve
218          * some space in the relay per-cpu buffer, to prevent an irq
219          * from coming in and stepping on our toes.
220          */
221         local_irq_save(flags);
222
223         if (unlikely(tsk->btrace_seq != blktrace_seq))
224                 trace_note_tsk(bt, tsk);
225
226         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
227         if (t) {
228                 sequence = per_cpu_ptr(bt->sequence, cpu);
229
230                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
231                 t->sequence = ++(*sequence);
232                 t->time = ktime_to_ns(ktime_get());
233 record_it:
234                 /*
235                  * These two are not needed in ftrace as they are in the
236                  * generic trace_entry, filled by tracing_generic_entry_update,
237                  * but for the trace_event->bin() synthesizer benefit we do it
238                  * here too.
239                  */
240                 t->cpu = cpu;
241                 t->pid = pid;
242
243                 t->sector = sector;
244                 t->bytes = bytes;
245                 t->action = what;
246                 t->device = bt->dev;
247                 t->error = error;
248                 t->pdu_len = pdu_len;
249
250                 if (pdu_len)
251                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
252
253                 if (blk_tracer) {
254                         trace_buffer_unlock_commit(blk_tr, event, 0, pc);
255                         return;
256                 }
257         }
258
259         local_irq_restore(flags);
260 }
261
262 static struct dentry *blk_tree_root;
263 static DEFINE_MUTEX(blk_tree_mutex);
264
265 static void blk_trace_free(struct blk_trace *bt)
266 {
267         debugfs_remove(bt->msg_file);
268         debugfs_remove(bt->dropped_file);
269         debugfs_remove(bt->dir);
270         relay_close(bt->rchan);
271         free_percpu(bt->sequence);
272         free_percpu(bt->msg_data);
273         kfree(bt);
274 }
275
276 static void blk_trace_cleanup(struct blk_trace *bt)
277 {
278         blk_trace_free(bt);
279         if (atomic_dec_and_test(&blk_probes_ref))
280                 blk_unregister_tracepoints();
281 }
282
283 int blk_trace_remove(struct request_queue *q)
284 {
285         struct blk_trace *bt;
286
287         bt = xchg(&q->blk_trace, NULL);
288         if (!bt)
289                 return -EINVAL;
290
291         if (bt->trace_state != Blktrace_running)
292                 blk_trace_cleanup(bt);
293
294         return 0;
295 }
296 EXPORT_SYMBOL_GPL(blk_trace_remove);
297
298 static int blk_dropped_open(struct inode *inode, struct file *filp)
299 {
300         filp->private_data = inode->i_private;
301
302         return 0;
303 }
304
305 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
306                                 size_t count, loff_t *ppos)
307 {
308         struct blk_trace *bt = filp->private_data;
309         char buf[16];
310
311         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
312
313         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
314 }
315
316 static const struct file_operations blk_dropped_fops = {
317         .owner =        THIS_MODULE,
318         .open =         blk_dropped_open,
319         .read =         blk_dropped_read,
320 };
321
322 static int blk_msg_open(struct inode *inode, struct file *filp)
323 {
324         filp->private_data = inode->i_private;
325
326         return 0;
327 }
328
329 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
330                                 size_t count, loff_t *ppos)
331 {
332         char *msg;
333         struct blk_trace *bt;
334
335         if (count >= BLK_TN_MAX_MSG)
336                 return -EINVAL;
337
338         msg = kmalloc(count + 1, GFP_KERNEL);
339         if (msg == NULL)
340                 return -ENOMEM;
341
342         if (copy_from_user(msg, buffer, count)) {
343                 kfree(msg);
344                 return -EFAULT;
345         }
346
347         msg[count] = '\0';
348         bt = filp->private_data;
349         __trace_note_message(bt, "%s", msg);
350         kfree(msg);
351
352         return count;
353 }
354
355 static const struct file_operations blk_msg_fops = {
356         .owner =        THIS_MODULE,
357         .open =         blk_msg_open,
358         .write =        blk_msg_write,
359 };
360
361 /*
362  * Keep track of how many times we encountered a full subbuffer, to aid
363  * the user space app in telling how many lost events there were.
364  */
365 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
366                                      void *prev_subbuf, size_t prev_padding)
367 {
368         struct blk_trace *bt;
369
370         if (!relay_buf_full(buf))
371                 return 1;
372
373         bt = buf->chan->private_data;
374         atomic_inc(&bt->dropped);
375         return 0;
376 }
377
378 static int blk_remove_buf_file_callback(struct dentry *dentry)
379 {
380         struct dentry *parent = dentry->d_parent;
381         debugfs_remove(dentry);
382
383         /*
384         * this will fail for all but the last file, but that is ok. what we
385         * care about is the top level buts->name directory going away, when
386         * the last trace file is gone. Then we don't have to rmdir() that
387         * manually on trace stop, so it nicely solves the issue with
388         * force killing of running traces.
389         */
390
391         debugfs_remove(parent);
392         return 0;
393 }
394
395 static struct dentry *blk_create_buf_file_callback(const char *filename,
396                                                    struct dentry *parent,
397                                                    int mode,
398                                                    struct rchan_buf *buf,
399                                                    int *is_global)
400 {
401         return debugfs_create_file(filename, mode, parent, buf,
402                                         &relay_file_operations);
403 }
404
405 static struct rchan_callbacks blk_relay_callbacks = {
406         .subbuf_start           = blk_subbuf_start_callback,
407         .create_buf_file        = blk_create_buf_file_callback,
408         .remove_buf_file        = blk_remove_buf_file_callback,
409 };
410
411 static void blk_trace_setup_lba(struct blk_trace *bt,
412                                 struct block_device *bdev)
413 {
414         struct hd_struct *part = NULL;
415
416         if (bdev)
417                 part = bdev->bd_part;
418
419         if (part) {
420                 bt->start_lba = part->start_sect;
421                 bt->end_lba = part->start_sect + part->nr_sects;
422         } else {
423                 bt->start_lba = 0;
424                 bt->end_lba = -1ULL;
425         }
426 }
427
428 /*
429  * Setup everything required to start tracing
430  */
431 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
432                        struct block_device *bdev,
433                        struct blk_user_trace_setup *buts)
434 {
435         struct blk_trace *old_bt, *bt = NULL;
436         struct dentry *dir = NULL;
437         int ret, i;
438
439         if (!buts->buf_size || !buts->buf_nr)
440                 return -EINVAL;
441
442         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
443         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
444
445         /*
446          * some device names have larger paths - convert the slashes
447          * to underscores for this to work as expected
448          */
449         for (i = 0; i < strlen(buts->name); i++)
450                 if (buts->name[i] == '/')
451                         buts->name[i] = '_';
452
453         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
454         if (!bt)
455                 return -ENOMEM;
456
457         ret = -ENOMEM;
458         bt->sequence = alloc_percpu(unsigned long);
459         if (!bt->sequence)
460                 goto err;
461
462         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
463         if (!bt->msg_data)
464                 goto err;
465
466         ret = -ENOENT;
467
468         mutex_lock(&blk_tree_mutex);
469         if (!blk_tree_root) {
470                 blk_tree_root = debugfs_create_dir("block", NULL);
471                 if (!blk_tree_root) {
472                         mutex_unlock(&blk_tree_mutex);
473                         goto err;
474                 }
475         }
476         mutex_unlock(&blk_tree_mutex);
477
478         dir = debugfs_create_dir(buts->name, blk_tree_root);
479
480         if (!dir)
481                 goto err;
482
483         bt->dir = dir;
484         bt->dev = dev;
485         atomic_set(&bt->dropped, 0);
486
487         ret = -EIO;
488         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
489                                                &blk_dropped_fops);
490         if (!bt->dropped_file)
491                 goto err;
492
493         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
494         if (!bt->msg_file)
495                 goto err;
496
497         bt->rchan = relay_open("trace", dir, buts->buf_size,
498                                 buts->buf_nr, &blk_relay_callbacks, bt);
499         if (!bt->rchan)
500                 goto err;
501
502         bt->act_mask = buts->act_mask;
503         if (!bt->act_mask)
504                 bt->act_mask = (u16) -1;
505
506         blk_trace_setup_lba(bt, bdev);
507
508         /* overwrite with user settings */
509         if (buts->start_lba)
510                 bt->start_lba = buts->start_lba;
511         if (buts->end_lba)
512                 bt->end_lba = buts->end_lba;
513
514         bt->pid = buts->pid;
515         bt->trace_state = Blktrace_setup;
516
517         ret = -EBUSY;
518         old_bt = xchg(&q->blk_trace, bt);
519         if (old_bt) {
520                 (void) xchg(&q->blk_trace, old_bt);
521                 goto err;
522         }
523
524         if (atomic_inc_return(&blk_probes_ref) == 1)
525                 blk_register_tracepoints();
526
527         return 0;
528 err:
529         blk_trace_free(bt);
530         return ret;
531 }
532
533 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
534                     struct block_device *bdev,
535                     char __user *arg)
536 {
537         struct blk_user_trace_setup buts;
538         int ret;
539
540         ret = copy_from_user(&buts, arg, sizeof(buts));
541         if (ret)
542                 return -EFAULT;
543
544         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
545         if (ret)
546                 return ret;
547
548         if (copy_to_user(arg, &buts, sizeof(buts)))
549                 return -EFAULT;
550
551         return 0;
552 }
553 EXPORT_SYMBOL_GPL(blk_trace_setup);
554
555 int blk_trace_startstop(struct request_queue *q, int start)
556 {
557         int ret;
558         struct blk_trace *bt = q->blk_trace;
559
560         if (bt == NULL)
561                 return -EINVAL;
562
563         /*
564          * For starting a trace, we can transition from a setup or stopped
565          * trace. For stopping a trace, the state must be running
566          */
567         ret = -EINVAL;
568         if (start) {
569                 if (bt->trace_state == Blktrace_setup ||
570                     bt->trace_state == Blktrace_stopped) {
571                         blktrace_seq++;
572                         smp_mb();
573                         bt->trace_state = Blktrace_running;
574
575                         trace_note_time(bt);
576                         ret = 0;
577                 }
578         } else {
579                 if (bt->trace_state == Blktrace_running) {
580                         bt->trace_state = Blktrace_stopped;
581                         relay_flush(bt->rchan);
582                         ret = 0;
583                 }
584         }
585
586         return ret;
587 }
588 EXPORT_SYMBOL_GPL(blk_trace_startstop);
589
590 /**
591  * blk_trace_ioctl: - handle the ioctls associated with tracing
592  * @bdev:       the block device
593  * @cmd:        the ioctl cmd
594  * @arg:        the argument data, if any
595  *
596  **/
597 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
598 {
599         struct request_queue *q;
600         int ret, start = 0;
601         char b[BDEVNAME_SIZE];
602
603         q = bdev_get_queue(bdev);
604         if (!q)
605                 return -ENXIO;
606
607         mutex_lock(&bdev->bd_mutex);
608
609         switch (cmd) {
610         case BLKTRACESETUP:
611                 bdevname(bdev, b);
612                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
613                 break;
614         case BLKTRACESTART:
615                 start = 1;
616         case BLKTRACESTOP:
617                 ret = blk_trace_startstop(q, start);
618                 break;
619         case BLKTRACETEARDOWN:
620                 ret = blk_trace_remove(q);
621                 break;
622         default:
623                 ret = -ENOTTY;
624                 break;
625         }
626
627         mutex_unlock(&bdev->bd_mutex);
628         return ret;
629 }
630
631 /**
632  * blk_trace_shutdown: - stop and cleanup trace structures
633  * @q:    the request queue associated with the device
634  *
635  **/
636 void blk_trace_shutdown(struct request_queue *q)
637 {
638         if (q->blk_trace) {
639                 blk_trace_startstop(q, 0);
640                 blk_trace_remove(q);
641         }
642 }
643
644 /*
645  * blktrace probes
646  */
647
648 /**
649  * blk_add_trace_rq - Add a trace for a request oriented action
650  * @q:          queue the io is for
651  * @rq:         the source request
652  * @what:       the action
653  *
654  * Description:
655  *     Records an action against a request. Will log the bio offset + size.
656  *
657  **/
658 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
659                                     u32 what)
660 {
661         struct blk_trace *bt = q->blk_trace;
662         int rw = rq->cmd_flags & 0x03;
663
664         if (likely(!bt))
665                 return;
666
667         if (blk_discard_rq(rq))
668                 rw |= (1 << BIO_RW_DISCARD);
669
670         if (blk_pc_request(rq)) {
671                 what |= BLK_TC_ACT(BLK_TC_PC);
672                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
673                                 what, rq->errors, rq->cmd_len, rq->cmd);
674         } else  {
675                 what |= BLK_TC_ACT(BLK_TC_FS);
676                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
677                                 what, rq->errors, 0, NULL);
678         }
679 }
680
681 static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
682 {
683         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
684 }
685
686 static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
687 {
688         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
689 }
690
691 static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
692 {
693         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
694 }
695
696 static void blk_add_trace_rq_requeue(struct request_queue *q,
697                                      struct request *rq)
698 {
699         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
700 }
701
702 static void blk_add_trace_rq_complete(struct request_queue *q,
703                                       struct request *rq)
704 {
705         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
706 }
707
708 /**
709  * blk_add_trace_bio - Add a trace for a bio oriented action
710  * @q:          queue the io is for
711  * @bio:        the source bio
712  * @what:       the action
713  *
714  * Description:
715  *     Records an action against a bio. Will log the bio offset + size.
716  *
717  **/
718 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
719                                      u32 what)
720 {
721         struct blk_trace *bt = q->blk_trace;
722
723         if (likely(!bt))
724                 return;
725
726         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
727                         !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
728 }
729
730 static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
731 {
732         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
733 }
734
735 static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
736 {
737         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
738 }
739
740 static void blk_add_trace_bio_backmerge(struct request_queue *q,
741                                         struct bio *bio)
742 {
743         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
744 }
745
746 static void blk_add_trace_bio_frontmerge(struct request_queue *q,
747                                          struct bio *bio)
748 {
749         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
750 }
751
752 static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
753 {
754         blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
755 }
756
757 static void blk_add_trace_getrq(struct request_queue *q,
758                                 struct bio *bio, int rw)
759 {
760         if (bio)
761                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
762         else {
763                 struct blk_trace *bt = q->blk_trace;
764
765                 if (bt)
766                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
767         }
768 }
769
770
771 static void blk_add_trace_sleeprq(struct request_queue *q,
772                                   struct bio *bio, int rw)
773 {
774         if (bio)
775                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
776         else {
777                 struct blk_trace *bt = q->blk_trace;
778
779                 if (bt)
780                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
781                                         0, 0, NULL);
782         }
783 }
784
785 static void blk_add_trace_plug(struct request_queue *q)
786 {
787         struct blk_trace *bt = q->blk_trace;
788
789         if (bt)
790                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
791 }
792
793 static void blk_add_trace_unplug_io(struct request_queue *q)
794 {
795         struct blk_trace *bt = q->blk_trace;
796
797         if (bt) {
798                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
799                 __be64 rpdu = cpu_to_be64(pdu);
800
801                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
802                                 sizeof(rpdu), &rpdu);
803         }
804 }
805
806 static void blk_add_trace_unplug_timer(struct request_queue *q)
807 {
808         struct blk_trace *bt = q->blk_trace;
809
810         if (bt) {
811                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
812                 __be64 rpdu = cpu_to_be64(pdu);
813
814                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
815                                 sizeof(rpdu), &rpdu);
816         }
817 }
818
819 static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
820                                 unsigned int pdu)
821 {
822         struct blk_trace *bt = q->blk_trace;
823
824         if (bt) {
825                 __be64 rpdu = cpu_to_be64(pdu);
826
827                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
828                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
829                                 sizeof(rpdu), &rpdu);
830         }
831 }
832
833 /**
834  * blk_add_trace_remap - Add a trace for a remap operation
835  * @q:          queue the io is for
836  * @bio:        the source bio
837  * @dev:        target device
838  * @from:       source sector
839  *
840  * Description:
841  *     Device mapper or raid target sometimes need to split a bio because
842  *     it spans a stripe (or similar). Add a trace for that action.
843  *
844  **/
845 static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
846                                        dev_t dev, sector_t from)
847 {
848         struct blk_trace *bt = q->blk_trace;
849         struct blk_io_trace_remap r;
850
851         if (likely(!bt))
852                 return;
853
854         r.device_from = cpu_to_be32(dev);
855         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
856         r.sector_from = cpu_to_be64(from);
857
858         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
859                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
860                         sizeof(r), &r);
861 }
862
863 /**
864  * blk_add_driver_data - Add binary message with driver-specific data
865  * @q:          queue the io is for
866  * @rq:         io request
867  * @data:       driver-specific data
868  * @len:        length of driver-specific data
869  *
870  * Description:
871  *     Some drivers might want to write driver-specific data per request.
872  *
873  **/
874 void blk_add_driver_data(struct request_queue *q,
875                          struct request *rq,
876                          void *data, size_t len)
877 {
878         struct blk_trace *bt = q->blk_trace;
879
880         if (likely(!bt))
881                 return;
882
883         if (blk_pc_request(rq))
884                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
885                                 BLK_TA_DRV_DATA, rq->errors, len, data);
886         else
887                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
888                                 BLK_TA_DRV_DATA, rq->errors, len, data);
889 }
890 EXPORT_SYMBOL_GPL(blk_add_driver_data);
891
892 static void blk_register_tracepoints(void)
893 {
894         int ret;
895
896         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
897         WARN_ON(ret);
898         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
899         WARN_ON(ret);
900         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
901         WARN_ON(ret);
902         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
903         WARN_ON(ret);
904         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
905         WARN_ON(ret);
906         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
907         WARN_ON(ret);
908         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
909         WARN_ON(ret);
910         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
911         WARN_ON(ret);
912         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
913         WARN_ON(ret);
914         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
915         WARN_ON(ret);
916         ret = register_trace_block_getrq(blk_add_trace_getrq);
917         WARN_ON(ret);
918         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
919         WARN_ON(ret);
920         ret = register_trace_block_plug(blk_add_trace_plug);
921         WARN_ON(ret);
922         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
923         WARN_ON(ret);
924         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
925         WARN_ON(ret);
926         ret = register_trace_block_split(blk_add_trace_split);
927         WARN_ON(ret);
928         ret = register_trace_block_remap(blk_add_trace_remap);
929         WARN_ON(ret);
930 }
931
932 static void blk_unregister_tracepoints(void)
933 {
934         unregister_trace_block_remap(blk_add_trace_remap);
935         unregister_trace_block_split(blk_add_trace_split);
936         unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
937         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
938         unregister_trace_block_plug(blk_add_trace_plug);
939         unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
940         unregister_trace_block_getrq(blk_add_trace_getrq);
941         unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
942         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
943         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
944         unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
945         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
946         unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
947         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
948         unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
949         unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
950         unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
951
952         tracepoint_synchronize_unregister();
953 }
954
955 /*
956  * struct blk_io_tracer formatting routines
957  */
958
959 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
960 {
961         int i = 0;
962         int tc = t->action >> BLK_TC_SHIFT;
963
964         if (t->action == BLK_TN_MESSAGE) {
965                 rwbs[i++] = 'N';
966                 goto out;
967         }
968
969         if (tc & BLK_TC_DISCARD)
970                 rwbs[i++] = 'D';
971         else if (tc & BLK_TC_WRITE)
972                 rwbs[i++] = 'W';
973         else if (t->bytes)
974                 rwbs[i++] = 'R';
975         else
976                 rwbs[i++] = 'N';
977
978         if (tc & BLK_TC_AHEAD)
979                 rwbs[i++] = 'A';
980         if (tc & BLK_TC_BARRIER)
981                 rwbs[i++] = 'B';
982         if (tc & BLK_TC_SYNC)
983                 rwbs[i++] = 'S';
984         if (tc & BLK_TC_META)
985                 rwbs[i++] = 'M';
986 out:
987         rwbs[i] = '\0';
988 }
989
990 static inline
991 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
992 {
993         return (const struct blk_io_trace *)ent;
994 }
995
996 static inline const void *pdu_start(const struct trace_entry *ent)
997 {
998         return te_blk_io_trace(ent) + 1;
999 }
1000
1001 static inline u32 t_action(const struct trace_entry *ent)
1002 {
1003         return te_blk_io_trace(ent)->action;
1004 }
1005
1006 static inline u32 t_bytes(const struct trace_entry *ent)
1007 {
1008         return te_blk_io_trace(ent)->bytes;
1009 }
1010
1011 static inline u32 t_sec(const struct trace_entry *ent)
1012 {
1013         return te_blk_io_trace(ent)->bytes >> 9;
1014 }
1015
1016 static inline unsigned long long t_sector(const struct trace_entry *ent)
1017 {
1018         return te_blk_io_trace(ent)->sector;
1019 }
1020
1021 static inline __u16 t_error(const struct trace_entry *ent)
1022 {
1023         return te_blk_io_trace(ent)->error;
1024 }
1025
1026 static __u64 get_pdu_int(const struct trace_entry *ent)
1027 {
1028         const __u64 *val = pdu_start(ent);
1029         return be64_to_cpu(*val);
1030 }
1031
1032 static void get_pdu_remap(const struct trace_entry *ent,
1033                           struct blk_io_trace_remap *r)
1034 {
1035         const struct blk_io_trace_remap *__r = pdu_start(ent);
1036         __u64 sector_from = __r->sector_from;
1037
1038         r->device_from = be32_to_cpu(__r->device_from);
1039         r->device_to   = be32_to_cpu(__r->device_to);
1040         r->sector_from = be64_to_cpu(sector_from);
1041 }
1042
1043 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1044
1045 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1046 {
1047         char rwbs[6];
1048         unsigned long long ts  = iter->ts;
1049         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1050         unsigned secs          = (unsigned long)ts;
1051         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1052
1053         fill_rwbs(rwbs, t);
1054
1055         return trace_seq_printf(&iter->seq,
1056                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1057                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1058                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1059 }
1060
1061 static int blk_log_action(struct trace_iterator *iter, const char *act)
1062 {
1063         char rwbs[6];
1064         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1065
1066         fill_rwbs(rwbs, t);
1067         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1068                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1069 }
1070
1071 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1072 {
1073         const unsigned char *pdu_buf;
1074         int pdu_len;
1075         int i, end, ret;
1076
1077         pdu_buf = pdu_start(ent);
1078         pdu_len = te_blk_io_trace(ent)->pdu_len;
1079
1080         if (!pdu_len)
1081                 return 1;
1082
1083         /* find the last zero that needs to be printed */
1084         for (end = pdu_len - 1; end >= 0; end--)
1085                 if (pdu_buf[end])
1086                         break;
1087         end++;
1088
1089         if (!trace_seq_putc(s, '('))
1090                 return 0;
1091
1092         for (i = 0; i < pdu_len; i++) {
1093
1094                 ret = trace_seq_printf(s, "%s%02x",
1095                                        i == 0 ? "" : " ", pdu_buf[i]);
1096                 if (!ret)
1097                         return ret;
1098
1099                 /*
1100                  * stop when the rest is just zeroes and indicate so
1101                  * with a ".." appended
1102                  */
1103                 if (i == end && end != pdu_len - 1)
1104                         return trace_seq_puts(s, " ..) ");
1105         }
1106
1107         return trace_seq_puts(s, ") ");
1108 }
1109
1110 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1111 {
1112         char cmd[TASK_COMM_LEN];
1113
1114         trace_find_cmdline(ent->pid, cmd);
1115
1116         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1117                 int ret;
1118
1119                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1120                 if (!ret)
1121                         return 0;
1122                 ret = blk_log_dump_pdu(s, ent);
1123                 if (!ret)
1124                         return 0;
1125                 return trace_seq_printf(s, "[%s]\n", cmd);
1126         } else {
1127                 if (t_sec(ent))
1128                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1129                                                 t_sector(ent), t_sec(ent), cmd);
1130                 return trace_seq_printf(s, "[%s]\n", cmd);
1131         }
1132 }
1133
1134 static int blk_log_with_error(struct trace_seq *s,
1135                               const struct trace_entry *ent)
1136 {
1137         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1138                 int ret;
1139
1140                 ret = blk_log_dump_pdu(s, ent);
1141                 if (ret)
1142                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1143                 return 0;
1144         } else {
1145                 if (t_sec(ent))
1146                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1147                                                 t_sector(ent),
1148                                                 t_sec(ent), t_error(ent));
1149                 return trace_seq_printf(s, "%llu [%d]\n",
1150                                         t_sector(ent), t_error(ent));
1151         }
1152 }
1153
1154 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1155 {
1156         struct blk_io_trace_remap r = { .device_from = 0, };
1157
1158         get_pdu_remap(ent, &r);
1159         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1160                                 t_sector(ent), t_sec(ent),
1161                                 MAJOR(r.device_from), MINOR(r.device_from),
1162                                 (unsigned long long)r.sector_from);
1163 }
1164
1165 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1166 {
1167         char cmd[TASK_COMM_LEN];
1168
1169         trace_find_cmdline(ent->pid, cmd);
1170
1171         return trace_seq_printf(s, "[%s]\n", cmd);
1172 }
1173
1174 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1175 {
1176         char cmd[TASK_COMM_LEN];
1177
1178         trace_find_cmdline(ent->pid, cmd);
1179
1180         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1181 }
1182
1183 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1184 {
1185         char cmd[TASK_COMM_LEN];
1186
1187         trace_find_cmdline(ent->pid, cmd);
1188
1189         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1190                                 get_pdu_int(ent), cmd);
1191 }
1192
1193 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1194 {
1195         int ret;
1196         const struct blk_io_trace *t = te_blk_io_trace(ent);
1197
1198         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1199         if (ret)
1200                 return trace_seq_putc(s, '\n');
1201         return ret;
1202 }
1203
1204 /*
1205  * struct tracer operations
1206  */
1207
1208 static void blk_tracer_print_header(struct seq_file *m)
1209 {
1210         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1211                 return;
1212         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1213                     "#  |     |     |           |   |   |\n");
1214 }
1215
1216 static void blk_tracer_start(struct trace_array *tr)
1217 {
1218         blk_tracer_enabled = true;
1219 }
1220
1221 static int blk_tracer_init(struct trace_array *tr)
1222 {
1223         blk_tr = tr;
1224         blk_tracer_start(tr);
1225         return 0;
1226 }
1227
1228 static void blk_tracer_stop(struct trace_array *tr)
1229 {
1230         blk_tracer_enabled = false;
1231 }
1232
1233 static void blk_tracer_reset(struct trace_array *tr)
1234 {
1235         blk_tracer_stop(tr);
1236 }
1237
1238 static const struct {
1239         const char *act[2];
1240         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1241 } what2act[] = {
1242         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1243         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1244         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1245         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1246         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1247         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1248         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1249         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1250         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1251         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1252         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1253         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1254         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1255         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1256         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1257 };
1258
1259 static enum print_line_t print_one_line(struct trace_iterator *iter,
1260                                         bool classic)
1261 {
1262         struct trace_seq *s = &iter->seq;
1263         const struct blk_io_trace *t;
1264         u16 what;
1265         int ret;
1266         bool long_act;
1267         blk_log_action_t *log_action;
1268
1269         t          = te_blk_io_trace(iter->ent);
1270         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1271         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1272         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1273
1274         if (t->action == BLK_TN_MESSAGE) {
1275                 ret = log_action(iter, long_act ? "message" : "m");
1276                 if (ret)
1277                         ret = blk_log_msg(s, iter->ent);
1278                 goto out;
1279         }
1280
1281         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1282                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1283         else {
1284                 ret = log_action(iter, what2act[what].act[long_act]);
1285                 if (ret)
1286                         ret = what2act[what].print(s, iter->ent);
1287         }
1288 out:
1289         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1290 }
1291
1292 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1293                                                int flags)
1294 {
1295         return print_one_line(iter, false);
1296 }
1297
1298 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1299 {
1300         struct trace_seq *s = &iter->seq;
1301         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1302         const int offset = offsetof(struct blk_io_trace, sector);
1303         struct blk_io_trace old = {
1304                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1305                 .time     = iter->ts,
1306         };
1307
1308         if (!trace_seq_putmem(s, &old, offset))
1309                 return 0;
1310         return trace_seq_putmem(s, &t->sector,
1311                                 sizeof(old) - offset + t->pdu_len);
1312 }
1313
1314 static enum print_line_t
1315 blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1316 {
1317         return blk_trace_synthesize_old_trace(iter) ?
1318                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1319 }
1320
1321 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1322 {
1323         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1324                 return TRACE_TYPE_UNHANDLED;
1325
1326         return print_one_line(iter, true);
1327 }
1328
1329 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1330 {
1331         /* don't output context-info for blk_classic output */
1332         if (bit == TRACE_BLK_OPT_CLASSIC) {
1333                 if (set)
1334                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1335                 else
1336                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1337         }
1338         return 0;
1339 }
1340
1341 static struct tracer blk_tracer __read_mostly = {
1342         .name           = "blk",
1343         .init           = blk_tracer_init,
1344         .reset          = blk_tracer_reset,
1345         .start          = blk_tracer_start,
1346         .stop           = blk_tracer_stop,
1347         .print_header   = blk_tracer_print_header,
1348         .print_line     = blk_tracer_print_line,
1349         .flags          = &blk_tracer_flags,
1350         .set_flag       = blk_tracer_set_flag,
1351 };
1352
1353 static struct trace_event trace_blk_event = {
1354         .type           = TRACE_BLK,
1355         .trace          = blk_trace_event_print,
1356         .binary         = blk_trace_event_print_binary,
1357 };
1358
1359 static int __init init_blk_tracer(void)
1360 {
1361         if (!register_ftrace_event(&trace_blk_event)) {
1362                 pr_warning("Warning: could not register block events\n");
1363                 return 1;
1364         }
1365
1366         if (register_tracer(&blk_tracer) != 0) {
1367                 pr_warning("Warning: could not register the block tracer\n");
1368                 unregister_ftrace_event(&trace_blk_event);
1369                 return 1;
1370         }
1371
1372         return 0;
1373 }
1374
1375 device_initcall(init_blk_tracer);
1376
1377 static int blk_trace_remove_queue(struct request_queue *q)
1378 {
1379         struct blk_trace *bt;
1380
1381         bt = xchg(&q->blk_trace, NULL);
1382         if (bt == NULL)
1383                 return -EINVAL;
1384
1385         if (atomic_dec_and_test(&blk_probes_ref))
1386                 blk_unregister_tracepoints();
1387
1388         blk_trace_free(bt);
1389         return 0;
1390 }
1391
1392 /*
1393  * Setup everything required to start tracing
1394  */
1395 static int blk_trace_setup_queue(struct request_queue *q,
1396                                  struct block_device *bdev)
1397 {
1398         struct blk_trace *old_bt, *bt = NULL;
1399         int ret = -ENOMEM;
1400
1401         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1402         if (!bt)
1403                 return -ENOMEM;
1404
1405         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1406         if (!bt->msg_data)
1407                 goto free_bt;
1408
1409         bt->dev = bdev->bd_dev;
1410         bt->act_mask = (u16)-1;
1411
1412         blk_trace_setup_lba(bt, bdev);
1413
1414         old_bt = xchg(&q->blk_trace, bt);
1415         if (old_bt != NULL) {
1416                 (void)xchg(&q->blk_trace, old_bt);
1417                 ret = -EBUSY;
1418                 goto free_bt;
1419         }
1420
1421         if (atomic_inc_return(&blk_probes_ref) == 1)
1422                 blk_register_tracepoints();
1423         return 0;
1424
1425 free_bt:
1426         blk_trace_free(bt);
1427         return ret;
1428 }
1429
1430 /*
1431  * sysfs interface to enable and configure tracing
1432  */
1433
1434 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1435                                          struct device_attribute *attr,
1436                                          char *buf);
1437 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1438                                           struct device_attribute *attr,
1439                                           const char *buf, size_t count);
1440 #define BLK_TRACE_DEVICE_ATTR(_name) \
1441         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1442                     sysfs_blk_trace_attr_show, \
1443                     sysfs_blk_trace_attr_store)
1444
1445 static BLK_TRACE_DEVICE_ATTR(enable);
1446 static BLK_TRACE_DEVICE_ATTR(act_mask);
1447 static BLK_TRACE_DEVICE_ATTR(pid);
1448 static BLK_TRACE_DEVICE_ATTR(start_lba);
1449 static BLK_TRACE_DEVICE_ATTR(end_lba);
1450
1451 static struct attribute *blk_trace_attrs[] = {
1452         &dev_attr_enable.attr,
1453         &dev_attr_act_mask.attr,
1454         &dev_attr_pid.attr,
1455         &dev_attr_start_lba.attr,
1456         &dev_attr_end_lba.attr,
1457         NULL
1458 };
1459
1460 struct attribute_group blk_trace_attr_group = {
1461         .name  = "trace",
1462         .attrs = blk_trace_attrs,
1463 };
1464
1465 static const struct {
1466         int mask;
1467         const char *str;
1468 } mask_maps[] = {
1469         { BLK_TC_READ,          "read"          },
1470         { BLK_TC_WRITE,         "write"         },
1471         { BLK_TC_BARRIER,       "barrier"       },
1472         { BLK_TC_SYNC,          "sync"          },
1473         { BLK_TC_QUEUE,         "queue"         },
1474         { BLK_TC_REQUEUE,       "requeue"       },
1475         { BLK_TC_ISSUE,         "issue"         },
1476         { BLK_TC_COMPLETE,      "complete"      },
1477         { BLK_TC_FS,            "fs"            },
1478         { BLK_TC_PC,            "pc"            },
1479         { BLK_TC_AHEAD,         "ahead"         },
1480         { BLK_TC_META,          "meta"          },
1481         { BLK_TC_DISCARD,       "discard"       },
1482         { BLK_TC_DRV_DATA,      "drv_data"      },
1483 };
1484
1485 static int blk_trace_str2mask(const char *str)
1486 {
1487         int i;
1488         int mask = 0;
1489         char *buf, *s, *token;
1490
1491         buf = kstrdup(str, GFP_KERNEL);
1492         if (buf == NULL)
1493                 return -ENOMEM;
1494         s = strstrip(buf);
1495
1496         while (1) {
1497                 token = strsep(&s, ",");
1498                 if (token == NULL)
1499                         break;
1500
1501                 if (*token == '\0')
1502                         continue;
1503
1504                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1505                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1506                                 mask |= mask_maps[i].mask;
1507                                 break;
1508                         }
1509                 }
1510                 if (i == ARRAY_SIZE(mask_maps)) {
1511                         mask = -EINVAL;
1512                         break;
1513                 }
1514         }
1515         kfree(buf);
1516
1517         return mask;
1518 }
1519
1520 static ssize_t blk_trace_mask2str(char *buf, int mask)
1521 {
1522         int i;
1523         char *p = buf;
1524
1525         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1526                 if (mask & mask_maps[i].mask) {
1527                         p += sprintf(p, "%s%s",
1528                                     (p == buf) ? "" : ",", mask_maps[i].str);
1529                 }
1530         }
1531         *p++ = '\n';
1532
1533         return p - buf;
1534 }
1535
1536 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1537 {
1538         if (bdev->bd_disk == NULL)
1539                 return NULL;
1540
1541         return bdev_get_queue(bdev);
1542 }
1543
1544 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1545                                          struct device_attribute *attr,
1546                                          char *buf)
1547 {
1548         struct hd_struct *p = dev_to_part(dev);
1549         struct request_queue *q;
1550         struct block_device *bdev;
1551         ssize_t ret = -ENXIO;
1552
1553         lock_kernel();
1554         bdev = bdget(part_devt(p));
1555         if (bdev == NULL)
1556                 goto out_unlock_kernel;
1557
1558         q = blk_trace_get_queue(bdev);
1559         if (q == NULL)
1560                 goto out_bdput;
1561
1562         mutex_lock(&bdev->bd_mutex);
1563
1564         if (attr == &dev_attr_enable) {
1565                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1566                 goto out_unlock_bdev;
1567         }
1568
1569         if (q->blk_trace == NULL)
1570                 ret = sprintf(buf, "disabled\n");
1571         else if (attr == &dev_attr_act_mask)
1572                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1573         else if (attr == &dev_attr_pid)
1574                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1575         else if (attr == &dev_attr_start_lba)
1576                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1577         else if (attr == &dev_attr_end_lba)
1578                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1579
1580 out_unlock_bdev:
1581         mutex_unlock(&bdev->bd_mutex);
1582 out_bdput:
1583         bdput(bdev);
1584 out_unlock_kernel:
1585         unlock_kernel();
1586         return ret;
1587 }
1588
1589 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1590                                           struct device_attribute *attr,
1591                                           const char *buf, size_t count)
1592 {
1593         struct block_device *bdev;
1594         struct request_queue *q;
1595         struct hd_struct *p;
1596         u64 value;
1597         ssize_t ret = -EINVAL;
1598
1599         if (count == 0)
1600                 goto out;
1601
1602         if (attr == &dev_attr_act_mask) {
1603                 if (sscanf(buf, "%llx", &value) != 1) {
1604                         /* Assume it is a list of trace category names */
1605                         ret = blk_trace_str2mask(buf);
1606                         if (ret < 0)
1607                                 goto out;
1608                         value = ret;
1609                 }
1610         } else if (sscanf(buf, "%llu", &value) != 1)
1611                 goto out;
1612
1613         ret = -ENXIO;
1614
1615         lock_kernel();
1616         p = dev_to_part(dev);
1617         bdev = bdget(part_devt(p));
1618         if (bdev == NULL)
1619                 goto out_unlock_kernel;
1620
1621         q = blk_trace_get_queue(bdev);
1622         if (q == NULL)
1623                 goto out_bdput;
1624
1625         mutex_lock(&bdev->bd_mutex);
1626
1627         if (attr == &dev_attr_enable) {
1628                 if (value)
1629                         ret = blk_trace_setup_queue(q, bdev);
1630                 else
1631                         ret = blk_trace_remove_queue(q);
1632                 goto out_unlock_bdev;
1633         }
1634
1635         ret = 0;
1636         if (q->blk_trace == NULL)
1637                 ret = blk_trace_setup_queue(q, bdev);
1638
1639         if (ret == 0) {
1640                 if (attr == &dev_attr_act_mask)
1641                         q->blk_trace->act_mask = value;
1642                 else if (attr == &dev_attr_pid)
1643                         q->blk_trace->pid = value;
1644                 else if (attr == &dev_attr_start_lba)
1645                         q->blk_trace->start_lba = value;
1646                 else if (attr == &dev_attr_end_lba)
1647                         q->blk_trace->end_lba = value;
1648         }
1649
1650 out_unlock_bdev:
1651         mutex_unlock(&bdev->bd_mutex);
1652 out_bdput:
1653         bdput(bdev);
1654 out_unlock_kernel:
1655         unlock_kernel();
1656 out:
1657         return ret ? ret : count;
1658 }
1659
1660 int blk_trace_init_sysfs(struct device *dev)
1661 {
1662         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1663 }
1664
1665 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1666
1667 #ifdef CONFIG_EVENT_TRACING
1668
1669 void blk_dump_cmd(char *buf, struct request *rq)
1670 {
1671         int i, end;
1672         int len = rq->cmd_len;
1673         unsigned char *cmd = rq->cmd;
1674
1675         if (!blk_pc_request(rq)) {
1676                 buf[0] = '\0';
1677                 return;
1678         }
1679
1680         for (end = len - 1; end >= 0; end--)
1681                 if (cmd[end])
1682                         break;
1683         end++;
1684
1685         for (i = 0; i < len; i++) {
1686                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1687                 if (i == end && end != len - 1) {
1688                         sprintf(buf, " ..");
1689                         break;
1690                 }
1691         }
1692 }
1693
1694 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1695 {
1696         int i = 0;
1697
1698         if (rw & WRITE)
1699                 rwbs[i++] = 'W';
1700         else if (rw & 1 << BIO_RW_DISCARD)
1701                 rwbs[i++] = 'D';
1702         else if (bytes)
1703                 rwbs[i++] = 'R';
1704         else
1705                 rwbs[i++] = 'N';
1706
1707         if (rw & 1 << BIO_RW_AHEAD)
1708                 rwbs[i++] = 'A';
1709         if (rw & 1 << BIO_RW_BARRIER)
1710                 rwbs[i++] = 'B';
1711         if (rw & 1 << BIO_RW_SYNCIO)
1712                 rwbs[i++] = 'S';
1713         if (rw & 1 << BIO_RW_META)
1714                 rwbs[i++] = 'M';
1715
1716         rwbs[i] = '\0';
1717 }
1718
1719 void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1720 {
1721         int rw = rq->cmd_flags & 0x03;
1722         int bytes;
1723
1724         if (blk_discard_rq(rq))
1725                 rw |= (1 << BIO_RW_DISCARD);
1726
1727         bytes = blk_rq_bytes(rq);
1728
1729         blk_fill_rwbs(rwbs, rw, bytes);
1730 }
1731
1732 #endif /* CONFIG_EVENT_TRACING */
1733