Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/debugfs.h>
25 #include <linux/time.h>
26 #include <trace/block.h>
27 #include <linux/uaccess.h>
28 #include "trace_output.h"
29
30 static unsigned int blktrace_seq __read_mostly = 1;
31
32 static struct trace_array *blk_tr;
33 static bool blk_tracer_enabled __read_mostly;
34
35 /* Select an alternative, minimalistic output than the original one */
36 #define TRACE_BLK_OPT_CLASSIC   0x1
37
38 static struct tracer_opt blk_tracer_opts[] = {
39         /* Default disable the minimalistic output */
40         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
41         { }
42 };
43
44 static struct tracer_flags blk_tracer_flags = {
45         .val  = 0,
46         .opts = blk_tracer_opts,
47 };
48
49 /* Global reference count of probes */
50 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
51
52 static void blk_register_tracepoints(void);
53 static void blk_unregister_tracepoints(void);
54
55 /*
56  * Send out a notify message.
57  */
58 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
59                        const void *data, size_t len)
60 {
61         struct blk_io_trace *t;
62         struct ring_buffer_event *event = NULL;
63         int pc = 0;
64         int cpu = smp_processor_id();
65         bool blk_tracer = blk_tracer_enabled;
66
67         if (blk_tracer) {
68                 pc = preempt_count();
69                 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
70                                                   sizeof(*t) + len,
71                                                   0, pc);
72                 if (!event)
73                         return;
74                 t = ring_buffer_event_data(event);
75                 goto record_it;
76         }
77
78         if (!bt->rchan)
79                 return;
80
81         t = relay_reserve(bt->rchan, sizeof(*t) + len);
82         if (t) {
83                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
84                 t->time = ktime_to_ns(ktime_get());
85 record_it:
86                 t->device = bt->dev;
87                 t->action = action;
88                 t->pid = pid;
89                 t->cpu = cpu;
90                 t->pdu_len = len;
91                 memcpy((void *) t + sizeof(*t), data, len);
92
93                 if (blk_tracer)
94                         trace_buffer_unlock_commit(blk_tr, event, 0, pc);
95         }
96 }
97
98 /*
99  * Send out a notify for this process, if we haven't done so since a trace
100  * started
101  */
102 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
103 {
104         tsk->btrace_seq = blktrace_seq;
105         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
106 }
107
108 static void trace_note_time(struct blk_trace *bt)
109 {
110         struct timespec now;
111         unsigned long flags;
112         u32 words[2];
113
114         getnstimeofday(&now);
115         words[0] = now.tv_sec;
116         words[1] = now.tv_nsec;
117
118         local_irq_save(flags);
119         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
120         local_irq_restore(flags);
121 }
122
123 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
124 {
125         int n;
126         va_list args;
127         unsigned long flags;
128         char *buf;
129
130         if (unlikely(bt->trace_state != Blktrace_running &&
131                      !blk_tracer_enabled))
132                 return;
133
134         local_irq_save(flags);
135         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
136         va_start(args, fmt);
137         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
138         va_end(args);
139
140         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
141         local_irq_restore(flags);
142 }
143 EXPORT_SYMBOL_GPL(__trace_note_message);
144
145 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
146                          pid_t pid)
147 {
148         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
149                 return 1;
150         if (sector < bt->start_lba || sector > bt->end_lba)
151                 return 1;
152         if (bt->pid && pid != bt->pid)
153                 return 1;
154
155         return 0;
156 }
157
158 /*
159  * Data direction bit lookup
160  */
161 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
162                                  BLK_TC_ACT(BLK_TC_WRITE) };
163
164 /* The ilog2() calls fall out because they're constant */
165 #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
166           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
167
168 /*
169  * The worker for the various blk_add_trace*() types. Fills out a
170  * blk_io_trace structure and places it in a per-cpu subbuffer.
171  */
172 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
173                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
174 {
175         struct task_struct *tsk = current;
176         struct ring_buffer_event *event = NULL;
177         struct blk_io_trace *t;
178         unsigned long flags = 0;
179         unsigned long *sequence;
180         pid_t pid;
181         int cpu, pc = 0;
182         bool blk_tracer = blk_tracer_enabled;
183
184         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
185                 return;
186
187         what |= ddir_act[rw & WRITE];
188         what |= MASK_TC_BIT(rw, BARRIER);
189         what |= MASK_TC_BIT(rw, SYNCIO);
190         what |= MASK_TC_BIT(rw, AHEAD);
191         what |= MASK_TC_BIT(rw, META);
192         what |= MASK_TC_BIT(rw, DISCARD);
193
194         pid = tsk->pid;
195         if (unlikely(act_log_check(bt, what, sector, pid)))
196                 return;
197         cpu = raw_smp_processor_id();
198
199         if (blk_tracer) {
200                 tracing_record_cmdline(current);
201
202                 pc = preempt_count();
203                 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
204                                                   sizeof(*t) + pdu_len,
205                                                   0, pc);
206                 if (!event)
207                         return;
208                 t = ring_buffer_event_data(event);
209                 goto record_it;
210         }
211
212         /*
213          * A word about the locking here - we disable interrupts to reserve
214          * some space in the relay per-cpu buffer, to prevent an irq
215          * from coming in and stepping on our toes.
216          */
217         local_irq_save(flags);
218
219         if (unlikely(tsk->btrace_seq != blktrace_seq))
220                 trace_note_tsk(bt, tsk);
221
222         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
223         if (t) {
224                 sequence = per_cpu_ptr(bt->sequence, cpu);
225
226                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
227                 t->sequence = ++(*sequence);
228                 t->time = ktime_to_ns(ktime_get());
229 record_it:
230                 /*
231                  * These two are not needed in ftrace as they are in the
232                  * generic trace_entry, filled by tracing_generic_entry_update,
233                  * but for the trace_event->bin() synthesizer benefit we do it
234                  * here too.
235                  */
236                 t->cpu = cpu;
237                 t->pid = pid;
238
239                 t->sector = sector;
240                 t->bytes = bytes;
241                 t->action = what;
242                 t->device = bt->dev;
243                 t->error = error;
244                 t->pdu_len = pdu_len;
245
246                 if (pdu_len)
247                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
248
249                 if (blk_tracer) {
250                         trace_buffer_unlock_commit(blk_tr, event, 0, pc);
251                         return;
252                 }
253         }
254
255         local_irq_restore(flags);
256 }
257
258 static struct dentry *blk_tree_root;
259 static DEFINE_MUTEX(blk_tree_mutex);
260
261 static void blk_trace_free(struct blk_trace *bt)
262 {
263         debugfs_remove(bt->msg_file);
264         debugfs_remove(bt->dropped_file);
265         relay_close(bt->rchan);
266         free_percpu(bt->sequence);
267         free_percpu(bt->msg_data);
268         kfree(bt);
269 }
270
271 static void blk_trace_cleanup(struct blk_trace *bt)
272 {
273         blk_trace_free(bt);
274         if (atomic_dec_and_test(&blk_probes_ref))
275                 blk_unregister_tracepoints();
276 }
277
278 int blk_trace_remove(struct request_queue *q)
279 {
280         struct blk_trace *bt;
281
282         bt = xchg(&q->blk_trace, NULL);
283         if (!bt)
284                 return -EINVAL;
285
286         if (bt->trace_state != Blktrace_running)
287                 blk_trace_cleanup(bt);
288
289         return 0;
290 }
291 EXPORT_SYMBOL_GPL(blk_trace_remove);
292
293 static int blk_dropped_open(struct inode *inode, struct file *filp)
294 {
295         filp->private_data = inode->i_private;
296
297         return 0;
298 }
299
300 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
301                                 size_t count, loff_t *ppos)
302 {
303         struct blk_trace *bt = filp->private_data;
304         char buf[16];
305
306         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
307
308         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
309 }
310
311 static const struct file_operations blk_dropped_fops = {
312         .owner =        THIS_MODULE,
313         .open =         blk_dropped_open,
314         .read =         blk_dropped_read,
315 };
316
317 static int blk_msg_open(struct inode *inode, struct file *filp)
318 {
319         filp->private_data = inode->i_private;
320
321         return 0;
322 }
323
324 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
325                                 size_t count, loff_t *ppos)
326 {
327         char *msg;
328         struct blk_trace *bt;
329
330         if (count > BLK_TN_MAX_MSG)
331                 return -EINVAL;
332
333         msg = kmalloc(count, GFP_KERNEL);
334         if (msg == NULL)
335                 return -ENOMEM;
336
337         if (copy_from_user(msg, buffer, count)) {
338                 kfree(msg);
339                 return -EFAULT;
340         }
341
342         bt = filp->private_data;
343         __trace_note_message(bt, "%s", msg);
344         kfree(msg);
345
346         return count;
347 }
348
349 static const struct file_operations blk_msg_fops = {
350         .owner =        THIS_MODULE,
351         .open =         blk_msg_open,
352         .write =        blk_msg_write,
353 };
354
355 /*
356  * Keep track of how many times we encountered a full subbuffer, to aid
357  * the user space app in telling how many lost events there were.
358  */
359 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
360                                      void *prev_subbuf, size_t prev_padding)
361 {
362         struct blk_trace *bt;
363
364         if (!relay_buf_full(buf))
365                 return 1;
366
367         bt = buf->chan->private_data;
368         atomic_inc(&bt->dropped);
369         return 0;
370 }
371
372 static int blk_remove_buf_file_callback(struct dentry *dentry)
373 {
374         struct dentry *parent = dentry->d_parent;
375         debugfs_remove(dentry);
376
377         /*
378         * this will fail for all but the last file, but that is ok. what we
379         * care about is the top level buts->name directory going away, when
380         * the last trace file is gone. Then we don't have to rmdir() that
381         * manually on trace stop, so it nicely solves the issue with
382         * force killing of running traces.
383         */
384
385         debugfs_remove(parent);
386         return 0;
387 }
388
389 static struct dentry *blk_create_buf_file_callback(const char *filename,
390                                                    struct dentry *parent,
391                                                    int mode,
392                                                    struct rchan_buf *buf,
393                                                    int *is_global)
394 {
395         return debugfs_create_file(filename, mode, parent, buf,
396                                         &relay_file_operations);
397 }
398
399 static struct rchan_callbacks blk_relay_callbacks = {
400         .subbuf_start           = blk_subbuf_start_callback,
401         .create_buf_file        = blk_create_buf_file_callback,
402         .remove_buf_file        = blk_remove_buf_file_callback,
403 };
404
405 /*
406  * Setup everything required to start tracing
407  */
408 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
409                         struct blk_user_trace_setup *buts)
410 {
411         struct blk_trace *old_bt, *bt = NULL;
412         struct dentry *dir = NULL;
413         int ret, i;
414
415         if (!buts->buf_size || !buts->buf_nr)
416                 return -EINVAL;
417
418         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
419         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
420
421         /*
422          * some device names have larger paths - convert the slashes
423          * to underscores for this to work as expected
424          */
425         for (i = 0; i < strlen(buts->name); i++)
426                 if (buts->name[i] == '/')
427                         buts->name[i] = '_';
428
429         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
430         if (!bt)
431                 return -ENOMEM;
432
433         ret = -ENOMEM;
434         bt->sequence = alloc_percpu(unsigned long);
435         if (!bt->sequence)
436                 goto err;
437
438         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
439         if (!bt->msg_data)
440                 goto err;
441
442         ret = -ENOENT;
443
444         mutex_lock(&blk_tree_mutex);
445         if (!blk_tree_root) {
446                 blk_tree_root = debugfs_create_dir("block", NULL);
447                 if (!blk_tree_root) {
448                         mutex_unlock(&blk_tree_mutex);
449                         goto err;
450                 }
451         }
452         mutex_unlock(&blk_tree_mutex);
453
454         dir = debugfs_create_dir(buts->name, blk_tree_root);
455
456         if (!dir)
457                 goto err;
458
459         bt->dir = dir;
460         bt->dev = dev;
461         atomic_set(&bt->dropped, 0);
462
463         ret = -EIO;
464         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
465                                                &blk_dropped_fops);
466         if (!bt->dropped_file)
467                 goto err;
468
469         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
470         if (!bt->msg_file)
471                 goto err;
472
473         bt->rchan = relay_open("trace", dir, buts->buf_size,
474                                 buts->buf_nr, &blk_relay_callbacks, bt);
475         if (!bt->rchan)
476                 goto err;
477
478         bt->act_mask = buts->act_mask;
479         if (!bt->act_mask)
480                 bt->act_mask = (u16) -1;
481
482         bt->start_lba = buts->start_lba;
483         bt->end_lba = buts->end_lba;
484         if (!bt->end_lba)
485                 bt->end_lba = -1ULL;
486
487         bt->pid = buts->pid;
488         bt->trace_state = Blktrace_setup;
489
490         ret = -EBUSY;
491         old_bt = xchg(&q->blk_trace, bt);
492         if (old_bt) {
493                 (void) xchg(&q->blk_trace, old_bt);
494                 goto err;
495         }
496
497         if (atomic_inc_return(&blk_probes_ref) == 1)
498                 blk_register_tracepoints();
499
500         return 0;
501 err:
502         blk_trace_free(bt);
503         return ret;
504 }
505
506 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
507                     char __user *arg)
508 {
509         struct blk_user_trace_setup buts;
510         int ret;
511
512         ret = copy_from_user(&buts, arg, sizeof(buts));
513         if (ret)
514                 return -EFAULT;
515
516         ret = do_blk_trace_setup(q, name, dev, &buts);
517         if (ret)
518                 return ret;
519
520         if (copy_to_user(arg, &buts, sizeof(buts)))
521                 return -EFAULT;
522
523         return 0;
524 }
525 EXPORT_SYMBOL_GPL(blk_trace_setup);
526
527 int blk_trace_startstop(struct request_queue *q, int start)
528 {
529         int ret;
530         struct blk_trace *bt = q->blk_trace;
531
532         if (bt == NULL)
533                 return -EINVAL;
534
535         /*
536          * For starting a trace, we can transition from a setup or stopped
537          * trace. For stopping a trace, the state must be running
538          */
539         ret = -EINVAL;
540         if (start) {
541                 if (bt->trace_state == Blktrace_setup ||
542                     bt->trace_state == Blktrace_stopped) {
543                         blktrace_seq++;
544                         smp_mb();
545                         bt->trace_state = Blktrace_running;
546
547                         trace_note_time(bt);
548                         ret = 0;
549                 }
550         } else {
551                 if (bt->trace_state == Blktrace_running) {
552                         bt->trace_state = Blktrace_stopped;
553                         relay_flush(bt->rchan);
554                         ret = 0;
555                 }
556         }
557
558         return ret;
559 }
560 EXPORT_SYMBOL_GPL(blk_trace_startstop);
561
562 /**
563  * blk_trace_ioctl: - handle the ioctls associated with tracing
564  * @bdev:       the block device
565  * @cmd:        the ioctl cmd
566  * @arg:        the argument data, if any
567  *
568  **/
569 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
570 {
571         struct request_queue *q;
572         int ret, start = 0;
573         char b[BDEVNAME_SIZE];
574
575         q = bdev_get_queue(bdev);
576         if (!q)
577                 return -ENXIO;
578
579         mutex_lock(&bdev->bd_mutex);
580
581         switch (cmd) {
582         case BLKTRACESETUP:
583                 bdevname(bdev, b);
584                 ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
585                 break;
586         case BLKTRACESTART:
587                 start = 1;
588         case BLKTRACESTOP:
589                 ret = blk_trace_startstop(q, start);
590                 break;
591         case BLKTRACETEARDOWN:
592                 ret = blk_trace_remove(q);
593                 break;
594         default:
595                 ret = -ENOTTY;
596                 break;
597         }
598
599         mutex_unlock(&bdev->bd_mutex);
600         return ret;
601 }
602
603 /**
604  * blk_trace_shutdown: - stop and cleanup trace structures
605  * @q:    the request queue associated with the device
606  *
607  **/
608 void blk_trace_shutdown(struct request_queue *q)
609 {
610         if (q->blk_trace) {
611                 blk_trace_startstop(q, 0);
612                 blk_trace_remove(q);
613         }
614 }
615
616 /*
617  * blktrace probes
618  */
619
620 /**
621  * blk_add_trace_rq - Add a trace for a request oriented action
622  * @q:          queue the io is for
623  * @rq:         the source request
624  * @what:       the action
625  *
626  * Description:
627  *     Records an action against a request. Will log the bio offset + size.
628  *
629  **/
630 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
631                                     u32 what)
632 {
633         struct blk_trace *bt = q->blk_trace;
634         int rw = rq->cmd_flags & 0x03;
635
636         if (likely(!bt))
637                 return;
638
639         if (blk_discard_rq(rq))
640                 rw |= (1 << BIO_RW_DISCARD);
641
642         if (blk_pc_request(rq)) {
643                 what |= BLK_TC_ACT(BLK_TC_PC);
644                 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
645                                 sizeof(rq->cmd), rq->cmd);
646         } else  {
647                 what |= BLK_TC_ACT(BLK_TC_FS);
648                 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
649                                 rw, what, rq->errors, 0, NULL);
650         }
651 }
652
653 static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
654 {
655         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
656 }
657
658 static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
659 {
660         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
661 }
662
663 static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
664 {
665         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
666 }
667
668 static void blk_add_trace_rq_requeue(struct request_queue *q,
669                                      struct request *rq)
670 {
671         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
672 }
673
674 static void blk_add_trace_rq_complete(struct request_queue *q,
675                                       struct request *rq)
676 {
677         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
678 }
679
680 /**
681  * blk_add_trace_bio - Add a trace for a bio oriented action
682  * @q:          queue the io is for
683  * @bio:        the source bio
684  * @what:       the action
685  *
686  * Description:
687  *     Records an action against a bio. Will log the bio offset + size.
688  *
689  **/
690 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
691                                      u32 what)
692 {
693         struct blk_trace *bt = q->blk_trace;
694
695         if (likely(!bt))
696                 return;
697
698         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
699                         !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
700 }
701
702 static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
703 {
704         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
705 }
706
707 static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
708 {
709         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
710 }
711
712 static void blk_add_trace_bio_backmerge(struct request_queue *q,
713                                         struct bio *bio)
714 {
715         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
716 }
717
718 static void blk_add_trace_bio_frontmerge(struct request_queue *q,
719                                          struct bio *bio)
720 {
721         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
722 }
723
724 static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
725 {
726         blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
727 }
728
729 static void blk_add_trace_getrq(struct request_queue *q,
730                                 struct bio *bio, int rw)
731 {
732         if (bio)
733                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
734         else {
735                 struct blk_trace *bt = q->blk_trace;
736
737                 if (bt)
738                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
739         }
740 }
741
742
743 static void blk_add_trace_sleeprq(struct request_queue *q,
744                                   struct bio *bio, int rw)
745 {
746         if (bio)
747                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
748         else {
749                 struct blk_trace *bt = q->blk_trace;
750
751                 if (bt)
752                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
753                                         0, 0, NULL);
754         }
755 }
756
757 static void blk_add_trace_plug(struct request_queue *q)
758 {
759         struct blk_trace *bt = q->blk_trace;
760
761         if (bt)
762                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
763 }
764
765 static void blk_add_trace_unplug_io(struct request_queue *q)
766 {
767         struct blk_trace *bt = q->blk_trace;
768
769         if (bt) {
770                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
771                 __be64 rpdu = cpu_to_be64(pdu);
772
773                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
774                                 sizeof(rpdu), &rpdu);
775         }
776 }
777
778 static void blk_add_trace_unplug_timer(struct request_queue *q)
779 {
780         struct blk_trace *bt = q->blk_trace;
781
782         if (bt) {
783                 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
784                 __be64 rpdu = cpu_to_be64(pdu);
785
786                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
787                                 sizeof(rpdu), &rpdu);
788         }
789 }
790
791 static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
792                                 unsigned int pdu)
793 {
794         struct blk_trace *bt = q->blk_trace;
795
796         if (bt) {
797                 __be64 rpdu = cpu_to_be64(pdu);
798
799                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
800                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
801                                 sizeof(rpdu), &rpdu);
802         }
803 }
804
805 /**
806  * blk_add_trace_remap - Add a trace for a remap operation
807  * @q:          queue the io is for
808  * @bio:        the source bio
809  * @dev:        target device
810  * @from:       source sector
811  * @to:         target sector
812  *
813  * Description:
814  *     Device mapper or raid target sometimes need to split a bio because
815  *     it spans a stripe (or similar). Add a trace for that action.
816  *
817  **/
818 static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
819                                        dev_t dev, sector_t from, sector_t to)
820 {
821         struct blk_trace *bt = q->blk_trace;
822         struct blk_io_trace_remap r;
823
824         if (likely(!bt))
825                 return;
826
827         r.device = cpu_to_be32(dev);
828         r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
829         r.sector = cpu_to_be64(to);
830
831         __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
832                         !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
833 }
834
835 /**
836  * blk_add_driver_data - Add binary message with driver-specific data
837  * @q:          queue the io is for
838  * @rq:         io request
839  * @data:       driver-specific data
840  * @len:        length of driver-specific data
841  *
842  * Description:
843  *     Some drivers might want to write driver-specific data per request.
844  *
845  **/
846 void blk_add_driver_data(struct request_queue *q,
847                          struct request *rq,
848                          void *data, size_t len)
849 {
850         struct blk_trace *bt = q->blk_trace;
851
852         if (likely(!bt))
853                 return;
854
855         if (blk_pc_request(rq))
856                 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
857                                 rq->errors, len, data);
858         else
859                 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
860                                 0, BLK_TA_DRV_DATA, rq->errors, len, data);
861 }
862 EXPORT_SYMBOL_GPL(blk_add_driver_data);
863
864 static void blk_register_tracepoints(void)
865 {
866         int ret;
867
868         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
869         WARN_ON(ret);
870         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
871         WARN_ON(ret);
872         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
873         WARN_ON(ret);
874         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
875         WARN_ON(ret);
876         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
877         WARN_ON(ret);
878         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
879         WARN_ON(ret);
880         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
881         WARN_ON(ret);
882         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
883         WARN_ON(ret);
884         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
885         WARN_ON(ret);
886         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
887         WARN_ON(ret);
888         ret = register_trace_block_getrq(blk_add_trace_getrq);
889         WARN_ON(ret);
890         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
891         WARN_ON(ret);
892         ret = register_trace_block_plug(blk_add_trace_plug);
893         WARN_ON(ret);
894         ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
895         WARN_ON(ret);
896         ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
897         WARN_ON(ret);
898         ret = register_trace_block_split(blk_add_trace_split);
899         WARN_ON(ret);
900         ret = register_trace_block_remap(blk_add_trace_remap);
901         WARN_ON(ret);
902 }
903
904 static void blk_unregister_tracepoints(void)
905 {
906         unregister_trace_block_remap(blk_add_trace_remap);
907         unregister_trace_block_split(blk_add_trace_split);
908         unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
909         unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
910         unregister_trace_block_plug(blk_add_trace_plug);
911         unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
912         unregister_trace_block_getrq(blk_add_trace_getrq);
913         unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
914         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
915         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
916         unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
917         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
918         unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
919         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
920         unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
921         unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
922         unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
923
924         tracepoint_synchronize_unregister();
925 }
926
927 /*
928  * struct blk_io_tracer formatting routines
929  */
930
931 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
932 {
933         int i = 0;
934         int tc = t->action >> BLK_TC_SHIFT;
935
936         if (t->action == BLK_TN_MESSAGE) {
937                 rwbs[i++] = 'N';
938                 goto out;
939         }
940
941         if (tc & BLK_TC_DISCARD)
942                 rwbs[i++] = 'D';
943         else if (tc & BLK_TC_WRITE)
944                 rwbs[i++] = 'W';
945         else if (t->bytes)
946                 rwbs[i++] = 'R';
947         else
948                 rwbs[i++] = 'N';
949
950         if (tc & BLK_TC_AHEAD)
951                 rwbs[i++] = 'A';
952         if (tc & BLK_TC_BARRIER)
953                 rwbs[i++] = 'B';
954         if (tc & BLK_TC_SYNC)
955                 rwbs[i++] = 'S';
956         if (tc & BLK_TC_META)
957                 rwbs[i++] = 'M';
958 out:
959         rwbs[i] = '\0';
960 }
961
962 static inline
963 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
964 {
965         return (const struct blk_io_trace *)ent;
966 }
967
968 static inline const void *pdu_start(const struct trace_entry *ent)
969 {
970         return te_blk_io_trace(ent) + 1;
971 }
972
973 static inline u32 t_sec(const struct trace_entry *ent)
974 {
975         return te_blk_io_trace(ent)->bytes >> 9;
976 }
977
978 static inline unsigned long long t_sector(const struct trace_entry *ent)
979 {
980         return te_blk_io_trace(ent)->sector;
981 }
982
983 static inline __u16 t_error(const struct trace_entry *ent)
984 {
985         return te_blk_io_trace(ent)->error;
986 }
987
988 static __u64 get_pdu_int(const struct trace_entry *ent)
989 {
990         const __u64 *val = pdu_start(ent);
991         return be64_to_cpu(*val);
992 }
993
994 static void get_pdu_remap(const struct trace_entry *ent,
995                           struct blk_io_trace_remap *r)
996 {
997         const struct blk_io_trace_remap *__r = pdu_start(ent);
998         __u64 sector = __r->sector;
999
1000         r->device = be32_to_cpu(__r->device);
1001         r->device_from = be32_to_cpu(__r->device_from);
1002         r->sector = be64_to_cpu(sector);
1003 }
1004
1005 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1006
1007 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1008 {
1009         char rwbs[6];
1010         unsigned long long ts  = iter->ts;
1011         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1012         unsigned secs          = (unsigned long)ts;
1013         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1014
1015         fill_rwbs(rwbs, t);
1016
1017         return trace_seq_printf(&iter->seq,
1018                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1019                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1020                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1021 }
1022
1023 static int blk_log_action(struct trace_iterator *iter, const char *act)
1024 {
1025         char rwbs[6];
1026         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1027
1028         fill_rwbs(rwbs, t);
1029         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1030                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1031 }
1032
1033 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1034 {
1035         char cmd[TASK_COMM_LEN];
1036
1037         trace_find_cmdline(ent->pid, cmd);
1038
1039         if (t_sec(ent))
1040                 return trace_seq_printf(s, "%llu + %u [%s]\n",
1041                                         t_sector(ent), t_sec(ent), cmd);
1042         return trace_seq_printf(s, "[%s]\n", cmd);
1043 }
1044
1045 static int blk_log_with_error(struct trace_seq *s,
1046                               const struct trace_entry *ent)
1047 {
1048         if (t_sec(ent))
1049                 return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent),
1050                                         t_sec(ent), t_error(ent));
1051         return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent));
1052 }
1053
1054 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1055 {
1056         struct blk_io_trace_remap r = { .device = 0, };
1057
1058         get_pdu_remap(ent, &r);
1059         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1060                                t_sector(ent),
1061                                t_sec(ent), MAJOR(r.device), MINOR(r.device),
1062                                (unsigned long long)r.sector);
1063 }
1064
1065 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1066 {
1067         char cmd[TASK_COMM_LEN];
1068
1069         trace_find_cmdline(ent->pid, cmd);
1070
1071         return trace_seq_printf(s, "[%s]\n", cmd);
1072 }
1073
1074 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1075 {
1076         char cmd[TASK_COMM_LEN];
1077
1078         trace_find_cmdline(ent->pid, cmd);
1079
1080         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1081 }
1082
1083 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1084 {
1085         char cmd[TASK_COMM_LEN];
1086
1087         trace_find_cmdline(ent->pid, cmd);
1088
1089         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1090                                 get_pdu_int(ent), cmd);
1091 }
1092
1093 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1094 {
1095         int ret;
1096         const struct blk_io_trace *t = te_blk_io_trace(ent);
1097
1098         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1099         if (ret)
1100                 return trace_seq_putc(s, '\n');
1101         return ret;
1102 }
1103
1104 /*
1105  * struct tracer operations
1106  */
1107
1108 static void blk_tracer_print_header(struct seq_file *m)
1109 {
1110         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1111                 return;
1112         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1113                     "#  |     |     |           |   |   |\n");
1114 }
1115
1116 static void blk_tracer_start(struct trace_array *tr)
1117 {
1118         blk_tracer_enabled = true;
1119         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1120 }
1121
1122 static int blk_tracer_init(struct trace_array *tr)
1123 {
1124         blk_tr = tr;
1125         blk_tracer_start(tr);
1126         return 0;
1127 }
1128
1129 static void blk_tracer_stop(struct trace_array *tr)
1130 {
1131         blk_tracer_enabled = false;
1132         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1133 }
1134
1135 static void blk_tracer_reset(struct trace_array *tr)
1136 {
1137         blk_tracer_stop(tr);
1138 }
1139
1140 static const struct {
1141         const char *act[2];
1142         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1143 } what2act[] = {
1144         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1145         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1146         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1147         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1148         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1149         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1150         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1151         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1152         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1153         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1154         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1155         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1156         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1157         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1158         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1159 };
1160
1161 static enum print_line_t print_one_line(struct trace_iterator *iter,
1162                                         bool classic)
1163 {
1164         struct trace_seq *s = &iter->seq;
1165         const struct blk_io_trace *t;
1166         u16 what;
1167         int ret;
1168         bool long_act;
1169         blk_log_action_t *log_action;
1170
1171         t          = te_blk_io_trace(iter->ent);
1172         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1173         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1174         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1175
1176         if (t->action == BLK_TN_MESSAGE) {
1177                 ret = log_action(iter, long_act ? "message" : "m");
1178                 if (ret)
1179                         ret = blk_log_msg(s, iter->ent);
1180                 goto out;
1181         }
1182
1183         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1184                 ret = trace_seq_printf(s, "Bad pc action %x\n", what);
1185         else {
1186                 ret = log_action(iter, what2act[what].act[long_act]);
1187                 if (ret)
1188                         ret = what2act[what].print(s, iter->ent);
1189         }
1190 out:
1191         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1192 }
1193
1194 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1195                                                int flags)
1196 {
1197         if (!trace_print_context(iter))
1198                 return TRACE_TYPE_PARTIAL_LINE;
1199
1200         return print_one_line(iter, false);
1201 }
1202
1203 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1204 {
1205         struct trace_seq *s = &iter->seq;
1206         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1207         const int offset = offsetof(struct blk_io_trace, sector);
1208         struct blk_io_trace old = {
1209                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1210                 .time     = iter->ts,
1211         };
1212
1213         if (!trace_seq_putmem(s, &old, offset))
1214                 return 0;
1215         return trace_seq_putmem(s, &t->sector,
1216                                 sizeof(old) - offset + t->pdu_len);
1217 }
1218
1219 static enum print_line_t
1220 blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
1221 {
1222         return blk_trace_synthesize_old_trace(iter) ?
1223                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1224 }
1225
1226 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1227 {
1228         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1229                 return TRACE_TYPE_UNHANDLED;
1230
1231         return print_one_line(iter, true);
1232 }
1233
1234 static struct tracer blk_tracer __read_mostly = {
1235         .name           = "blk",
1236         .init           = blk_tracer_init,
1237         .reset          = blk_tracer_reset,
1238         .start          = blk_tracer_start,
1239         .stop           = blk_tracer_stop,
1240         .print_header   = blk_tracer_print_header,
1241         .print_line     = blk_tracer_print_line,
1242         .flags          = &blk_tracer_flags,
1243 };
1244
1245 static struct trace_event trace_blk_event = {
1246         .type           = TRACE_BLK,
1247         .trace          = blk_trace_event_print,
1248         .binary         = blk_trace_event_print_binary,
1249 };
1250
1251 static int __init init_blk_tracer(void)
1252 {
1253         if (!register_ftrace_event(&trace_blk_event)) {
1254                 pr_warning("Warning: could not register block events\n");
1255                 return 1;
1256         }
1257
1258         if (register_tracer(&blk_tracer) != 0) {
1259                 pr_warning("Warning: could not register the block tracer\n");
1260                 unregister_ftrace_event(&trace_blk_event);
1261                 return 1;
1262         }
1263
1264         return 0;
1265 }
1266
1267 device_initcall(init_blk_tracer);
1268
1269 static int blk_trace_remove_queue(struct request_queue *q)
1270 {
1271         struct blk_trace *bt;
1272
1273         bt = xchg(&q->blk_trace, NULL);
1274         if (bt == NULL)
1275                 return -EINVAL;
1276
1277         if (atomic_dec_and_test(&blk_probes_ref))
1278                 blk_unregister_tracepoints();
1279
1280         blk_trace_free(bt);
1281         return 0;
1282 }
1283
1284 /*
1285  * Setup everything required to start tracing
1286  */
1287 static int blk_trace_setup_queue(struct request_queue *q, dev_t dev)
1288 {
1289         struct blk_trace *old_bt, *bt = NULL;
1290         int ret = -ENOMEM;
1291
1292         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1293         if (!bt)
1294                 return -ENOMEM;
1295
1296         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1297         if (!bt->msg_data)
1298                 goto free_bt;
1299
1300         bt->dev = dev;
1301         bt->act_mask = (u16)-1;
1302         bt->end_lba = -1ULL;
1303
1304         old_bt = xchg(&q->blk_trace, bt);
1305         if (old_bt != NULL) {
1306                 (void)xchg(&q->blk_trace, old_bt);
1307                 ret = -EBUSY;
1308                 goto free_bt;
1309         }
1310
1311         if (atomic_inc_return(&blk_probes_ref) == 1)
1312                 blk_register_tracepoints();
1313         return 0;
1314
1315 free_bt:
1316         blk_trace_free(bt);
1317         return ret;
1318 }
1319
1320 /*
1321  * sysfs interface to enable and configure tracing
1322  */
1323
1324 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1325                                          struct device_attribute *attr,
1326                                          char *buf);
1327 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1328                                           struct device_attribute *attr,
1329                                           const char *buf, size_t count);
1330 #define BLK_TRACE_DEVICE_ATTR(_name) \
1331         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1332                     sysfs_blk_trace_attr_show, \
1333                     sysfs_blk_trace_attr_store)
1334
1335 static BLK_TRACE_DEVICE_ATTR(enable);
1336 static BLK_TRACE_DEVICE_ATTR(act_mask);
1337 static BLK_TRACE_DEVICE_ATTR(pid);
1338 static BLK_TRACE_DEVICE_ATTR(start_lba);
1339 static BLK_TRACE_DEVICE_ATTR(end_lba);
1340
1341 static struct attribute *blk_trace_attrs[] = {
1342         &dev_attr_enable.attr,
1343         &dev_attr_act_mask.attr,
1344         &dev_attr_pid.attr,
1345         &dev_attr_start_lba.attr,
1346         &dev_attr_end_lba.attr,
1347         NULL
1348 };
1349
1350 struct attribute_group blk_trace_attr_group = {
1351         .name  = "trace",
1352         .attrs = blk_trace_attrs,
1353 };
1354
1355 static const struct {
1356         int mask;
1357         const char *str;
1358 } mask_maps[] = {
1359         { BLK_TC_READ,          "read"          },
1360         { BLK_TC_WRITE,         "write"         },
1361         { BLK_TC_BARRIER,       "barrier"       },
1362         { BLK_TC_SYNC,          "sync"          },
1363         { BLK_TC_QUEUE,         "queue"         },
1364         { BLK_TC_REQUEUE,       "requeue"       },
1365         { BLK_TC_ISSUE,         "issue"         },
1366         { BLK_TC_COMPLETE,      "complete"      },
1367         { BLK_TC_FS,            "fs"            },
1368         { BLK_TC_PC,            "pc"            },
1369         { BLK_TC_AHEAD,         "ahead"         },
1370         { BLK_TC_META,          "meta"          },
1371         { BLK_TC_DISCARD,       "discard"       },
1372         { BLK_TC_DRV_DATA,      "drv_data"      },
1373 };
1374
1375 static int blk_trace_str2mask(const char *str)
1376 {
1377         int i;
1378         int mask = 0;
1379         char *s, *token;
1380
1381         s = kstrdup(str, GFP_KERNEL);
1382         if (s == NULL)
1383                 return -ENOMEM;
1384         s = strstrip(s);
1385
1386         while (1) {
1387                 token = strsep(&s, ",");
1388                 if (token == NULL)
1389                         break;
1390
1391                 if (*token == '\0')
1392                         continue;
1393
1394                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1395                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1396                                 mask |= mask_maps[i].mask;
1397                                 break;
1398                         }
1399                 }
1400                 if (i == ARRAY_SIZE(mask_maps)) {
1401                         mask = -EINVAL;
1402                         break;
1403                 }
1404         }
1405         kfree(s);
1406
1407         return mask;
1408 }
1409
1410 static ssize_t blk_trace_mask2str(char *buf, int mask)
1411 {
1412         int i;
1413         char *p = buf;
1414
1415         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1416                 if (mask & mask_maps[i].mask) {
1417                         p += sprintf(p, "%s%s",
1418                                     (p == buf) ? "" : ",", mask_maps[i].str);
1419                 }
1420         }
1421         *p++ = '\n';
1422
1423         return p - buf;
1424 }
1425
1426 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1427 {
1428         if (bdev->bd_disk == NULL)
1429                 return NULL;
1430
1431         return bdev_get_queue(bdev);
1432 }
1433
1434 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1435                                          struct device_attribute *attr,
1436                                          char *buf)
1437 {
1438         struct hd_struct *p = dev_to_part(dev);
1439         struct request_queue *q;
1440         struct block_device *bdev;
1441         ssize_t ret = -ENXIO;
1442
1443         lock_kernel();
1444         bdev = bdget(part_devt(p));
1445         if (bdev == NULL)
1446                 goto out_unlock_kernel;
1447
1448         q = blk_trace_get_queue(bdev);
1449         if (q == NULL)
1450                 goto out_bdput;
1451
1452         mutex_lock(&bdev->bd_mutex);
1453
1454         if (attr == &dev_attr_enable) {
1455                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1456                 goto out_unlock_bdev;
1457         }
1458
1459         if (q->blk_trace == NULL)
1460                 ret = sprintf(buf, "disabled\n");
1461         else if (attr == &dev_attr_act_mask)
1462                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1463         else if (attr == &dev_attr_pid)
1464                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1465         else if (attr == &dev_attr_start_lba)
1466                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1467         else if (attr == &dev_attr_end_lba)
1468                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1469
1470 out_unlock_bdev:
1471         mutex_unlock(&bdev->bd_mutex);
1472 out_bdput:
1473         bdput(bdev);
1474 out_unlock_kernel:
1475         unlock_kernel();
1476         return ret;
1477 }
1478
1479 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1480                                           struct device_attribute *attr,
1481                                           const char *buf, size_t count)
1482 {
1483         struct block_device *bdev;
1484         struct request_queue *q;
1485         struct hd_struct *p;
1486         u64 value;
1487         ssize_t ret = -EINVAL;
1488
1489         if (count == 0)
1490                 goto out;
1491
1492         if (attr == &dev_attr_act_mask) {
1493                 if (sscanf(buf, "%llx", &value) != 1) {
1494                         /* Assume it is a list of trace category names */
1495                         ret = blk_trace_str2mask(buf);
1496                         if (ret < 0)
1497                                 goto out;
1498                         value = ret;
1499                 }
1500         } else if (sscanf(buf, "%llu", &value) != 1)
1501                 goto out;
1502
1503         ret = -ENXIO;
1504
1505         lock_kernel();
1506         p = dev_to_part(dev);
1507         bdev = bdget(part_devt(p));
1508         if (bdev == NULL)
1509                 goto out_unlock_kernel;
1510
1511         q = blk_trace_get_queue(bdev);
1512         if (q == NULL)
1513                 goto out_bdput;
1514
1515         mutex_lock(&bdev->bd_mutex);
1516
1517         if (attr == &dev_attr_enable) {
1518                 if (value)
1519                         ret = blk_trace_setup_queue(q, bdev->bd_dev);
1520                 else
1521                         ret = blk_trace_remove_queue(q);
1522                 goto out_unlock_bdev;
1523         }
1524
1525         ret = 0;
1526         if (q->blk_trace == NULL)
1527                 ret = blk_trace_setup_queue(q, bdev->bd_dev);
1528
1529         if (ret == 0) {
1530                 if (attr == &dev_attr_act_mask)
1531                         q->blk_trace->act_mask = value;
1532                 else if (attr == &dev_attr_pid)
1533                         q->blk_trace->pid = value;
1534                 else if (attr == &dev_attr_start_lba)
1535                         q->blk_trace->start_lba = value;
1536                 else if (attr == &dev_attr_end_lba)
1537                         q->blk_trace->end_lba = value;
1538         }
1539
1540 out_unlock_bdev:
1541         mutex_unlock(&bdev->bd_mutex);
1542 out_bdput:
1543         bdput(bdev);
1544 out_unlock_kernel:
1545         unlock_kernel();
1546 out:
1547         return ret ? ret : count;
1548 }
1549