block: add missing block_bio_complete() tracepoint
[linux-3.10.git] / kernel / trace / blktrace.c
1 /*
2  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
16  *
17  */
18 #include <linux/kernel.h>
19 #include <linux/blkdev.h>
20 #include <linux/blktrace_api.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/time.h>
28 #include <linux/uaccess.h>
29
30 #include <trace/events/block.h>
31
32 #include "trace_output.h"
33
34 #ifdef CONFIG_BLK_DEV_IO_TRACE
35
36 static unsigned int blktrace_seq __read_mostly = 1;
37
38 static struct trace_array *blk_tr;
39 static bool blk_tracer_enabled __read_mostly;
40
41 /* Select an alternative, minimalistic output than the original one */
42 #define TRACE_BLK_OPT_CLASSIC   0x1
43
44 static struct tracer_opt blk_tracer_opts[] = {
45         /* Default disable the minimalistic output */
46         { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
47         { }
48 };
49
50 static struct tracer_flags blk_tracer_flags = {
51         .val  = 0,
52         .opts = blk_tracer_opts,
53 };
54
55 /* Global reference count of probes */
56 static atomic_t blk_probes_ref = ATOMIC_INIT(0);
57
58 static void blk_register_tracepoints(void);
59 static void blk_unregister_tracepoints(void);
60
61 /*
62  * Send out a notify message.
63  */
64 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
65                        const void *data, size_t len)
66 {
67         struct blk_io_trace *t;
68         struct ring_buffer_event *event = NULL;
69         struct ring_buffer *buffer = NULL;
70         int pc = 0;
71         int cpu = smp_processor_id();
72         bool blk_tracer = blk_tracer_enabled;
73
74         if (blk_tracer) {
75                 buffer = blk_tr->buffer;
76                 pc = preempt_count();
77                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
78                                                   sizeof(*t) + len,
79                                                   0, pc);
80                 if (!event)
81                         return;
82                 t = ring_buffer_event_data(event);
83                 goto record_it;
84         }
85
86         if (!bt->rchan)
87                 return;
88
89         t = relay_reserve(bt->rchan, sizeof(*t) + len);
90         if (t) {
91                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
92                 t->time = ktime_to_ns(ktime_get());
93 record_it:
94                 t->device = bt->dev;
95                 t->action = action;
96                 t->pid = pid;
97                 t->cpu = cpu;
98                 t->pdu_len = len;
99                 memcpy((void *) t + sizeof(*t), data, len);
100
101                 if (blk_tracer)
102                         trace_buffer_unlock_commit(buffer, event, 0, pc);
103         }
104 }
105
106 /*
107  * Send out a notify for this process, if we haven't done so since a trace
108  * started
109  */
110 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
111 {
112         tsk->btrace_seq = blktrace_seq;
113         trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
114 }
115
116 static void trace_note_time(struct blk_trace *bt)
117 {
118         struct timespec now;
119         unsigned long flags;
120         u32 words[2];
121
122         getnstimeofday(&now);
123         words[0] = now.tv_sec;
124         words[1] = now.tv_nsec;
125
126         local_irq_save(flags);
127         trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
128         local_irq_restore(flags);
129 }
130
131 void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
132 {
133         int n;
134         va_list args;
135         unsigned long flags;
136         char *buf;
137
138         if (unlikely(bt->trace_state != Blktrace_running &&
139                      !blk_tracer_enabled))
140                 return;
141
142         /*
143          * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
144          * message to the trace.
145          */
146         if (!(bt->act_mask & BLK_TC_NOTIFY))
147                 return;
148
149         local_irq_save(flags);
150         buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
151         va_start(args, fmt);
152         n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
153         va_end(args);
154
155         trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
156         local_irq_restore(flags);
157 }
158 EXPORT_SYMBOL_GPL(__trace_note_message);
159
160 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
161                          pid_t pid)
162 {
163         if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
164                 return 1;
165         if (sector && (sector < bt->start_lba || sector > bt->end_lba))
166                 return 1;
167         if (bt->pid && pid != bt->pid)
168                 return 1;
169
170         return 0;
171 }
172
173 /*
174  * Data direction bit lookup
175  */
176 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
177                                  BLK_TC_ACT(BLK_TC_WRITE) };
178
179 #define BLK_TC_RAHEAD           BLK_TC_AHEAD
180
181 /* The ilog2() calls fall out because they're constant */
182 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
183           (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
184
185 /*
186  * The worker for the various blk_add_trace*() types. Fills out a
187  * blk_io_trace structure and places it in a per-cpu subbuffer.
188  */
189 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
190                      int rw, u32 what, int error, int pdu_len, void *pdu_data)
191 {
192         struct task_struct *tsk = current;
193         struct ring_buffer_event *event = NULL;
194         struct ring_buffer *buffer = NULL;
195         struct blk_io_trace *t;
196         unsigned long flags = 0;
197         unsigned long *sequence;
198         pid_t pid;
199         int cpu, pc = 0;
200         bool blk_tracer = blk_tracer_enabled;
201
202         if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
203                 return;
204
205         what |= ddir_act[rw & WRITE];
206         what |= MASK_TC_BIT(rw, SYNC);
207         what |= MASK_TC_BIT(rw, RAHEAD);
208         what |= MASK_TC_BIT(rw, META);
209         what |= MASK_TC_BIT(rw, DISCARD);
210         what |= MASK_TC_BIT(rw, FLUSH);
211         what |= MASK_TC_BIT(rw, FUA);
212
213         pid = tsk->pid;
214         if (act_log_check(bt, what, sector, pid))
215                 return;
216         cpu = raw_smp_processor_id();
217
218         if (blk_tracer) {
219                 tracing_record_cmdline(current);
220
221                 buffer = blk_tr->buffer;
222                 pc = preempt_count();
223                 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
224                                                   sizeof(*t) + pdu_len,
225                                                   0, pc);
226                 if (!event)
227                         return;
228                 t = ring_buffer_event_data(event);
229                 goto record_it;
230         }
231
232         /*
233          * A word about the locking here - we disable interrupts to reserve
234          * some space in the relay per-cpu buffer, to prevent an irq
235          * from coming in and stepping on our toes.
236          */
237         local_irq_save(flags);
238
239         if (unlikely(tsk->btrace_seq != blktrace_seq))
240                 trace_note_tsk(bt, tsk);
241
242         t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
243         if (t) {
244                 sequence = per_cpu_ptr(bt->sequence, cpu);
245
246                 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
247                 t->sequence = ++(*sequence);
248                 t->time = ktime_to_ns(ktime_get());
249 record_it:
250                 /*
251                  * These two are not needed in ftrace as they are in the
252                  * generic trace_entry, filled by tracing_generic_entry_update,
253                  * but for the trace_event->bin() synthesizer benefit we do it
254                  * here too.
255                  */
256                 t->cpu = cpu;
257                 t->pid = pid;
258
259                 t->sector = sector;
260                 t->bytes = bytes;
261                 t->action = what;
262                 t->device = bt->dev;
263                 t->error = error;
264                 t->pdu_len = pdu_len;
265
266                 if (pdu_len)
267                         memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
268
269                 if (blk_tracer) {
270                         trace_buffer_unlock_commit(buffer, event, 0, pc);
271                         return;
272                 }
273         }
274
275         local_irq_restore(flags);
276 }
277
278 static struct dentry *blk_tree_root;
279 static DEFINE_MUTEX(blk_tree_mutex);
280
281 static void blk_trace_free(struct blk_trace *bt)
282 {
283         debugfs_remove(bt->msg_file);
284         debugfs_remove(bt->dropped_file);
285         relay_close(bt->rchan);
286         debugfs_remove(bt->dir);
287         free_percpu(bt->sequence);
288         free_percpu(bt->msg_data);
289         kfree(bt);
290 }
291
292 static void blk_trace_cleanup(struct blk_trace *bt)
293 {
294         blk_trace_free(bt);
295         if (atomic_dec_and_test(&blk_probes_ref))
296                 blk_unregister_tracepoints();
297 }
298
299 int blk_trace_remove(struct request_queue *q)
300 {
301         struct blk_trace *bt;
302
303         bt = xchg(&q->blk_trace, NULL);
304         if (!bt)
305                 return -EINVAL;
306
307         if (bt->trace_state != Blktrace_running)
308                 blk_trace_cleanup(bt);
309
310         return 0;
311 }
312 EXPORT_SYMBOL_GPL(blk_trace_remove);
313
314 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
315                                 size_t count, loff_t *ppos)
316 {
317         struct blk_trace *bt = filp->private_data;
318         char buf[16];
319
320         snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
321
322         return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
323 }
324
325 static const struct file_operations blk_dropped_fops = {
326         .owner =        THIS_MODULE,
327         .open =         simple_open,
328         .read =         blk_dropped_read,
329         .llseek =       default_llseek,
330 };
331
332 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
333                                 size_t count, loff_t *ppos)
334 {
335         char *msg;
336         struct blk_trace *bt;
337
338         if (count >= BLK_TN_MAX_MSG)
339                 return -EINVAL;
340
341         msg = kmalloc(count + 1, GFP_KERNEL);
342         if (msg == NULL)
343                 return -ENOMEM;
344
345         if (copy_from_user(msg, buffer, count)) {
346                 kfree(msg);
347                 return -EFAULT;
348         }
349
350         msg[count] = '\0';
351         bt = filp->private_data;
352         __trace_note_message(bt, "%s", msg);
353         kfree(msg);
354
355         return count;
356 }
357
358 static const struct file_operations blk_msg_fops = {
359         .owner =        THIS_MODULE,
360         .open =         simple_open,
361         .write =        blk_msg_write,
362         .llseek =       noop_llseek,
363 };
364
365 /*
366  * Keep track of how many times we encountered a full subbuffer, to aid
367  * the user space app in telling how many lost events there were.
368  */
369 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
370                                      void *prev_subbuf, size_t prev_padding)
371 {
372         struct blk_trace *bt;
373
374         if (!relay_buf_full(buf))
375                 return 1;
376
377         bt = buf->chan->private_data;
378         atomic_inc(&bt->dropped);
379         return 0;
380 }
381
382 static int blk_remove_buf_file_callback(struct dentry *dentry)
383 {
384         debugfs_remove(dentry);
385
386         return 0;
387 }
388
389 static struct dentry *blk_create_buf_file_callback(const char *filename,
390                                                    struct dentry *parent,
391                                                    umode_t mode,
392                                                    struct rchan_buf *buf,
393                                                    int *is_global)
394 {
395         return debugfs_create_file(filename, mode, parent, buf,
396                                         &relay_file_operations);
397 }
398
399 static struct rchan_callbacks blk_relay_callbacks = {
400         .subbuf_start           = blk_subbuf_start_callback,
401         .create_buf_file        = blk_create_buf_file_callback,
402         .remove_buf_file        = blk_remove_buf_file_callback,
403 };
404
405 static void blk_trace_setup_lba(struct blk_trace *bt,
406                                 struct block_device *bdev)
407 {
408         struct hd_struct *part = NULL;
409
410         if (bdev)
411                 part = bdev->bd_part;
412
413         if (part) {
414                 bt->start_lba = part->start_sect;
415                 bt->end_lba = part->start_sect + part->nr_sects;
416         } else {
417                 bt->start_lba = 0;
418                 bt->end_lba = -1ULL;
419         }
420 }
421
422 /*
423  * Setup everything required to start tracing
424  */
425 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
426                        struct block_device *bdev,
427                        struct blk_user_trace_setup *buts)
428 {
429         struct blk_trace *old_bt, *bt = NULL;
430         struct dentry *dir = NULL;
431         int ret, i;
432
433         if (!buts->buf_size || !buts->buf_nr)
434                 return -EINVAL;
435
436         strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
437         buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
438
439         /*
440          * some device names have larger paths - convert the slashes
441          * to underscores for this to work as expected
442          */
443         for (i = 0; i < strlen(buts->name); i++)
444                 if (buts->name[i] == '/')
445                         buts->name[i] = '_';
446
447         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
448         if (!bt)
449                 return -ENOMEM;
450
451         ret = -ENOMEM;
452         bt->sequence = alloc_percpu(unsigned long);
453         if (!bt->sequence)
454                 goto err;
455
456         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
457         if (!bt->msg_data)
458                 goto err;
459
460         ret = -ENOENT;
461
462         mutex_lock(&blk_tree_mutex);
463         if (!blk_tree_root) {
464                 blk_tree_root = debugfs_create_dir("block", NULL);
465                 if (!blk_tree_root) {
466                         mutex_unlock(&blk_tree_mutex);
467                         goto err;
468                 }
469         }
470         mutex_unlock(&blk_tree_mutex);
471
472         dir = debugfs_create_dir(buts->name, blk_tree_root);
473
474         if (!dir)
475                 goto err;
476
477         bt->dir = dir;
478         bt->dev = dev;
479         atomic_set(&bt->dropped, 0);
480
481         ret = -EIO;
482         bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
483                                                &blk_dropped_fops);
484         if (!bt->dropped_file)
485                 goto err;
486
487         bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
488         if (!bt->msg_file)
489                 goto err;
490
491         bt->rchan = relay_open("trace", dir, buts->buf_size,
492                                 buts->buf_nr, &blk_relay_callbacks, bt);
493         if (!bt->rchan)
494                 goto err;
495
496         bt->act_mask = buts->act_mask;
497         if (!bt->act_mask)
498                 bt->act_mask = (u16) -1;
499
500         blk_trace_setup_lba(bt, bdev);
501
502         /* overwrite with user settings */
503         if (buts->start_lba)
504                 bt->start_lba = buts->start_lba;
505         if (buts->end_lba)
506                 bt->end_lba = buts->end_lba;
507
508         bt->pid = buts->pid;
509         bt->trace_state = Blktrace_setup;
510
511         ret = -EBUSY;
512         old_bt = xchg(&q->blk_trace, bt);
513         if (old_bt) {
514                 (void) xchg(&q->blk_trace, old_bt);
515                 goto err;
516         }
517
518         if (atomic_inc_return(&blk_probes_ref) == 1)
519                 blk_register_tracepoints();
520
521         return 0;
522 err:
523         blk_trace_free(bt);
524         return ret;
525 }
526
527 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
528                     struct block_device *bdev,
529                     char __user *arg)
530 {
531         struct blk_user_trace_setup buts;
532         int ret;
533
534         ret = copy_from_user(&buts, arg, sizeof(buts));
535         if (ret)
536                 return -EFAULT;
537
538         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
539         if (ret)
540                 return ret;
541
542         if (copy_to_user(arg, &buts, sizeof(buts))) {
543                 blk_trace_remove(q);
544                 return -EFAULT;
545         }
546         return 0;
547 }
548 EXPORT_SYMBOL_GPL(blk_trace_setup);
549
550 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
551 static int compat_blk_trace_setup(struct request_queue *q, char *name,
552                                   dev_t dev, struct block_device *bdev,
553                                   char __user *arg)
554 {
555         struct blk_user_trace_setup buts;
556         struct compat_blk_user_trace_setup cbuts;
557         int ret;
558
559         if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
560                 return -EFAULT;
561
562         buts = (struct blk_user_trace_setup) {
563                 .act_mask = cbuts.act_mask,
564                 .buf_size = cbuts.buf_size,
565                 .buf_nr = cbuts.buf_nr,
566                 .start_lba = cbuts.start_lba,
567                 .end_lba = cbuts.end_lba,
568                 .pid = cbuts.pid,
569         };
570         memcpy(&buts.name, &cbuts.name, 32);
571
572         ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
573         if (ret)
574                 return ret;
575
576         if (copy_to_user(arg, &buts.name, 32)) {
577                 blk_trace_remove(q);
578                 return -EFAULT;
579         }
580
581         return 0;
582 }
583 #endif
584
585 int blk_trace_startstop(struct request_queue *q, int start)
586 {
587         int ret;
588         struct blk_trace *bt = q->blk_trace;
589
590         if (bt == NULL)
591                 return -EINVAL;
592
593         /*
594          * For starting a trace, we can transition from a setup or stopped
595          * trace. For stopping a trace, the state must be running
596          */
597         ret = -EINVAL;
598         if (start) {
599                 if (bt->trace_state == Blktrace_setup ||
600                     bt->trace_state == Blktrace_stopped) {
601                         blktrace_seq++;
602                         smp_mb();
603                         bt->trace_state = Blktrace_running;
604
605                         trace_note_time(bt);
606                         ret = 0;
607                 }
608         } else {
609                 if (bt->trace_state == Blktrace_running) {
610                         bt->trace_state = Blktrace_stopped;
611                         relay_flush(bt->rchan);
612                         ret = 0;
613                 }
614         }
615
616         return ret;
617 }
618 EXPORT_SYMBOL_GPL(blk_trace_startstop);
619
620 /**
621  * blk_trace_ioctl: - handle the ioctls associated with tracing
622  * @bdev:       the block device
623  * @cmd:        the ioctl cmd
624  * @arg:        the argument data, if any
625  *
626  **/
627 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
628 {
629         struct request_queue *q;
630         int ret, start = 0;
631         char b[BDEVNAME_SIZE];
632
633         q = bdev_get_queue(bdev);
634         if (!q)
635                 return -ENXIO;
636
637         mutex_lock(&bdev->bd_mutex);
638
639         switch (cmd) {
640         case BLKTRACESETUP:
641                 bdevname(bdev, b);
642                 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
643                 break;
644 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
645         case BLKTRACESETUP32:
646                 bdevname(bdev, b);
647                 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
648                 break;
649 #endif
650         case BLKTRACESTART:
651                 start = 1;
652         case BLKTRACESTOP:
653                 ret = blk_trace_startstop(q, start);
654                 break;
655         case BLKTRACETEARDOWN:
656                 ret = blk_trace_remove(q);
657                 break;
658         default:
659                 ret = -ENOTTY;
660                 break;
661         }
662
663         mutex_unlock(&bdev->bd_mutex);
664         return ret;
665 }
666
667 /**
668  * blk_trace_shutdown: - stop and cleanup trace structures
669  * @q:    the request queue associated with the device
670  *
671  **/
672 void blk_trace_shutdown(struct request_queue *q)
673 {
674         if (q->blk_trace) {
675                 blk_trace_startstop(q, 0);
676                 blk_trace_remove(q);
677         }
678 }
679
680 /*
681  * blktrace probes
682  */
683
684 /**
685  * blk_add_trace_rq - Add a trace for a request oriented action
686  * @q:          queue the io is for
687  * @rq:         the source request
688  * @what:       the action
689  *
690  * Description:
691  *     Records an action against a request. Will log the bio offset + size.
692  *
693  **/
694 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
695                              u32 what)
696 {
697         struct blk_trace *bt = q->blk_trace;
698
699         if (likely(!bt))
700                 return;
701
702         if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
703                 what |= BLK_TC_ACT(BLK_TC_PC);
704                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
705                                 what, rq->errors, rq->cmd_len, rq->cmd);
706         } else  {
707                 what |= BLK_TC_ACT(BLK_TC_FS);
708                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
709                                 rq->cmd_flags, what, rq->errors, 0, NULL);
710         }
711 }
712
713 static void blk_add_trace_rq_abort(void *ignore,
714                                    struct request_queue *q, struct request *rq)
715 {
716         blk_add_trace_rq(q, rq, BLK_TA_ABORT);
717 }
718
719 static void blk_add_trace_rq_insert(void *ignore,
720                                     struct request_queue *q, struct request *rq)
721 {
722         blk_add_trace_rq(q, rq, BLK_TA_INSERT);
723 }
724
725 static void blk_add_trace_rq_issue(void *ignore,
726                                    struct request_queue *q, struct request *rq)
727 {
728         blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
729 }
730
731 static void blk_add_trace_rq_requeue(void *ignore,
732                                      struct request_queue *q,
733                                      struct request *rq)
734 {
735         blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
736 }
737
738 static void blk_add_trace_rq_complete(void *ignore,
739                                       struct request_queue *q,
740                                       struct request *rq)
741 {
742         struct blk_trace *bt = q->blk_trace;
743
744         /* if control ever passes through here, it's a request based driver */
745         if (unlikely(bt && !bt->rq_based))
746                 bt->rq_based = true;
747
748         blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
749 }
750
751 /**
752  * blk_add_trace_bio - Add a trace for a bio oriented action
753  * @q:          queue the io is for
754  * @bio:        the source bio
755  * @what:       the action
756  * @error:      error, if any
757  *
758  * Description:
759  *     Records an action against a bio. Will log the bio offset + size.
760  *
761  **/
762 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
763                               u32 what, int error)
764 {
765         struct blk_trace *bt = q->blk_trace;
766
767         if (likely(!bt))
768                 return;
769
770         if (!error && !bio_flagged(bio, BIO_UPTODATE))
771                 error = EIO;
772
773         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
774                         error, 0, NULL);
775 }
776
777 static void blk_add_trace_bio_bounce(void *ignore,
778                                      struct request_queue *q, struct bio *bio)
779 {
780         blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
781 }
782
783 static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
784 {
785         struct request_queue *q;
786         struct blk_trace *bt;
787
788         if (!bio->bi_bdev)
789                 return;
790
791         q = bdev_get_queue(bio->bi_bdev);
792         bt = q->blk_trace;
793
794         /*
795          * Request based drivers will generate both rq and bio completions.
796          * Ignore bio ones.
797          */
798         if (likely(!bt) || bt->rq_based)
799                 return;
800
801         blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
802 }
803
804 static void blk_add_trace_bio_backmerge(void *ignore,
805                                         struct request_queue *q,
806                                         struct bio *bio)
807 {
808         blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
809 }
810
811 static void blk_add_trace_bio_frontmerge(void *ignore,
812                                          struct request_queue *q,
813                                          struct bio *bio)
814 {
815         blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
816 }
817
818 static void blk_add_trace_bio_queue(void *ignore,
819                                     struct request_queue *q, struct bio *bio)
820 {
821         blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
822 }
823
824 static void blk_add_trace_getrq(void *ignore,
825                                 struct request_queue *q,
826                                 struct bio *bio, int rw)
827 {
828         if (bio)
829                 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
830         else {
831                 struct blk_trace *bt = q->blk_trace;
832
833                 if (bt)
834                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
835         }
836 }
837
838
839 static void blk_add_trace_sleeprq(void *ignore,
840                                   struct request_queue *q,
841                                   struct bio *bio, int rw)
842 {
843         if (bio)
844                 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
845         else {
846                 struct blk_trace *bt = q->blk_trace;
847
848                 if (bt)
849                         __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
850                                         0, 0, NULL);
851         }
852 }
853
854 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
855 {
856         struct blk_trace *bt = q->blk_trace;
857
858         if (bt)
859                 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
860 }
861
862 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
863                                     unsigned int depth, bool explicit)
864 {
865         struct blk_trace *bt = q->blk_trace;
866
867         if (bt) {
868                 __be64 rpdu = cpu_to_be64(depth);
869                 u32 what;
870
871                 if (explicit)
872                         what = BLK_TA_UNPLUG_IO;
873                 else
874                         what = BLK_TA_UNPLUG_TIMER;
875
876                 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
877         }
878 }
879
880 static void blk_add_trace_split(void *ignore,
881                                 struct request_queue *q, struct bio *bio,
882                                 unsigned int pdu)
883 {
884         struct blk_trace *bt = q->blk_trace;
885
886         if (bt) {
887                 __be64 rpdu = cpu_to_be64(pdu);
888
889                 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
890                                 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
891                                 sizeof(rpdu), &rpdu);
892         }
893 }
894
895 /**
896  * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
897  * @ignore:     trace callback data parameter (not used)
898  * @q:          queue the io is for
899  * @bio:        the source bio
900  * @dev:        target device
901  * @from:       source sector
902  *
903  * Description:
904  *     Device mapper or raid target sometimes need to split a bio because
905  *     it spans a stripe (or similar). Add a trace for that action.
906  *
907  **/
908 static void blk_add_trace_bio_remap(void *ignore,
909                                     struct request_queue *q, struct bio *bio,
910                                     dev_t dev, sector_t from)
911 {
912         struct blk_trace *bt = q->blk_trace;
913         struct blk_io_trace_remap r;
914
915         if (likely(!bt))
916                 return;
917
918         r.device_from = cpu_to_be32(dev);
919         r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
920         r.sector_from = cpu_to_be64(from);
921
922         __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
923                         BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
924                         sizeof(r), &r);
925 }
926
927 /**
928  * blk_add_trace_rq_remap - Add a trace for a request-remap operation
929  * @ignore:     trace callback data parameter (not used)
930  * @q:          queue the io is for
931  * @rq:         the source request
932  * @dev:        target device
933  * @from:       source sector
934  *
935  * Description:
936  *     Device mapper remaps request to other devices.
937  *     Add a trace for that action.
938  *
939  **/
940 static void blk_add_trace_rq_remap(void *ignore,
941                                    struct request_queue *q,
942                                    struct request *rq, dev_t dev,
943                                    sector_t from)
944 {
945         struct blk_trace *bt = q->blk_trace;
946         struct blk_io_trace_remap r;
947
948         if (likely(!bt))
949                 return;
950
951         r.device_from = cpu_to_be32(dev);
952         r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
953         r.sector_from = cpu_to_be64(from);
954
955         __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
956                         rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
957                         sizeof(r), &r);
958 }
959
960 /**
961  * blk_add_driver_data - Add binary message with driver-specific data
962  * @q:          queue the io is for
963  * @rq:         io request
964  * @data:       driver-specific data
965  * @len:        length of driver-specific data
966  *
967  * Description:
968  *     Some drivers might want to write driver-specific data per request.
969  *
970  **/
971 void blk_add_driver_data(struct request_queue *q,
972                          struct request *rq,
973                          void *data, size_t len)
974 {
975         struct blk_trace *bt = q->blk_trace;
976
977         if (likely(!bt))
978                 return;
979
980         if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
981                 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
982                                 BLK_TA_DRV_DATA, rq->errors, len, data);
983         else
984                 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
985                                 BLK_TA_DRV_DATA, rq->errors, len, data);
986 }
987 EXPORT_SYMBOL_GPL(blk_add_driver_data);
988
989 static void blk_register_tracepoints(void)
990 {
991         int ret;
992
993         ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
994         WARN_ON(ret);
995         ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
996         WARN_ON(ret);
997         ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
998         WARN_ON(ret);
999         ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1000         WARN_ON(ret);
1001         ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1002         WARN_ON(ret);
1003         ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1004         WARN_ON(ret);
1005         ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1006         WARN_ON(ret);
1007         ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1008         WARN_ON(ret);
1009         ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1010         WARN_ON(ret);
1011         ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1012         WARN_ON(ret);
1013         ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1014         WARN_ON(ret);
1015         ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1016         WARN_ON(ret);
1017         ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1018         WARN_ON(ret);
1019         ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1020         WARN_ON(ret);
1021         ret = register_trace_block_split(blk_add_trace_split, NULL);
1022         WARN_ON(ret);
1023         ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1024         WARN_ON(ret);
1025         ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1026         WARN_ON(ret);
1027 }
1028
1029 static void blk_unregister_tracepoints(void)
1030 {
1031         unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1032         unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1033         unregister_trace_block_split(blk_add_trace_split, NULL);
1034         unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1035         unregister_trace_block_plug(blk_add_trace_plug, NULL);
1036         unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1037         unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1038         unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1039         unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1040         unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1041         unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1042         unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1043         unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1044         unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1045         unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1046         unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1047         unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
1048
1049         tracepoint_synchronize_unregister();
1050 }
1051
1052 /*
1053  * struct blk_io_tracer formatting routines
1054  */
1055
1056 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1057 {
1058         int i = 0;
1059         int tc = t->action >> BLK_TC_SHIFT;
1060
1061         if (t->action == BLK_TN_MESSAGE) {
1062                 rwbs[i++] = 'N';
1063                 goto out;
1064         }
1065
1066         if (tc & BLK_TC_FLUSH)
1067                 rwbs[i++] = 'F';
1068
1069         if (tc & BLK_TC_DISCARD)
1070                 rwbs[i++] = 'D';
1071         else if (tc & BLK_TC_WRITE)
1072                 rwbs[i++] = 'W';
1073         else if (t->bytes)
1074                 rwbs[i++] = 'R';
1075         else
1076                 rwbs[i++] = 'N';
1077
1078         if (tc & BLK_TC_FUA)
1079                 rwbs[i++] = 'F';
1080         if (tc & BLK_TC_AHEAD)
1081                 rwbs[i++] = 'A';
1082         if (tc & BLK_TC_SYNC)
1083                 rwbs[i++] = 'S';
1084         if (tc & BLK_TC_META)
1085                 rwbs[i++] = 'M';
1086 out:
1087         rwbs[i] = '\0';
1088 }
1089
1090 static inline
1091 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1092 {
1093         return (const struct blk_io_trace *)ent;
1094 }
1095
1096 static inline const void *pdu_start(const struct trace_entry *ent)
1097 {
1098         return te_blk_io_trace(ent) + 1;
1099 }
1100
1101 static inline u32 t_action(const struct trace_entry *ent)
1102 {
1103         return te_blk_io_trace(ent)->action;
1104 }
1105
1106 static inline u32 t_bytes(const struct trace_entry *ent)
1107 {
1108         return te_blk_io_trace(ent)->bytes;
1109 }
1110
1111 static inline u32 t_sec(const struct trace_entry *ent)
1112 {
1113         return te_blk_io_trace(ent)->bytes >> 9;
1114 }
1115
1116 static inline unsigned long long t_sector(const struct trace_entry *ent)
1117 {
1118         return te_blk_io_trace(ent)->sector;
1119 }
1120
1121 static inline __u16 t_error(const struct trace_entry *ent)
1122 {
1123         return te_blk_io_trace(ent)->error;
1124 }
1125
1126 static __u64 get_pdu_int(const struct trace_entry *ent)
1127 {
1128         const __u64 *val = pdu_start(ent);
1129         return be64_to_cpu(*val);
1130 }
1131
1132 static void get_pdu_remap(const struct trace_entry *ent,
1133                           struct blk_io_trace_remap *r)
1134 {
1135         const struct blk_io_trace_remap *__r = pdu_start(ent);
1136         __u64 sector_from = __r->sector_from;
1137
1138         r->device_from = be32_to_cpu(__r->device_from);
1139         r->device_to   = be32_to_cpu(__r->device_to);
1140         r->sector_from = be64_to_cpu(sector_from);
1141 }
1142
1143 typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1144
1145 static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
1146 {
1147         char rwbs[RWBS_LEN];
1148         unsigned long long ts  = iter->ts;
1149         unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1150         unsigned secs          = (unsigned long)ts;
1151         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1152
1153         fill_rwbs(rwbs, t);
1154
1155         return trace_seq_printf(&iter->seq,
1156                                 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1157                                 MAJOR(t->device), MINOR(t->device), iter->cpu,
1158                                 secs, nsec_rem, iter->ent->pid, act, rwbs);
1159 }
1160
1161 static int blk_log_action(struct trace_iterator *iter, const char *act)
1162 {
1163         char rwbs[RWBS_LEN];
1164         const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1165
1166         fill_rwbs(rwbs, t);
1167         return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1168                                 MAJOR(t->device), MINOR(t->device), act, rwbs);
1169 }
1170
1171 static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1172 {
1173         const unsigned char *pdu_buf;
1174         int pdu_len;
1175         int i, end, ret;
1176
1177         pdu_buf = pdu_start(ent);
1178         pdu_len = te_blk_io_trace(ent)->pdu_len;
1179
1180         if (!pdu_len)
1181                 return 1;
1182
1183         /* find the last zero that needs to be printed */
1184         for (end = pdu_len - 1; end >= 0; end--)
1185                 if (pdu_buf[end])
1186                         break;
1187         end++;
1188
1189         if (!trace_seq_putc(s, '('))
1190                 return 0;
1191
1192         for (i = 0; i < pdu_len; i++) {
1193
1194                 ret = trace_seq_printf(s, "%s%02x",
1195                                        i == 0 ? "" : " ", pdu_buf[i]);
1196                 if (!ret)
1197                         return ret;
1198
1199                 /*
1200                  * stop when the rest is just zeroes and indicate so
1201                  * with a ".." appended
1202                  */
1203                 if (i == end && end != pdu_len - 1)
1204                         return trace_seq_puts(s, " ..) ");
1205         }
1206
1207         return trace_seq_puts(s, ") ");
1208 }
1209
1210 static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1211 {
1212         char cmd[TASK_COMM_LEN];
1213
1214         trace_find_cmdline(ent->pid, cmd);
1215
1216         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1217                 int ret;
1218
1219                 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1220                 if (!ret)
1221                         return 0;
1222                 ret = blk_log_dump_pdu(s, ent);
1223                 if (!ret)
1224                         return 0;
1225                 return trace_seq_printf(s, "[%s]\n", cmd);
1226         } else {
1227                 if (t_sec(ent))
1228                         return trace_seq_printf(s, "%llu + %u [%s]\n",
1229                                                 t_sector(ent), t_sec(ent), cmd);
1230                 return trace_seq_printf(s, "[%s]\n", cmd);
1231         }
1232 }
1233
1234 static int blk_log_with_error(struct trace_seq *s,
1235                               const struct trace_entry *ent)
1236 {
1237         if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1238                 int ret;
1239
1240                 ret = blk_log_dump_pdu(s, ent);
1241                 if (ret)
1242                         return trace_seq_printf(s, "[%d]\n", t_error(ent));
1243                 return 0;
1244         } else {
1245                 if (t_sec(ent))
1246                         return trace_seq_printf(s, "%llu + %u [%d]\n",
1247                                                 t_sector(ent),
1248                                                 t_sec(ent), t_error(ent));
1249                 return trace_seq_printf(s, "%llu [%d]\n",
1250                                         t_sector(ent), t_error(ent));
1251         }
1252 }
1253
1254 static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1255 {
1256         struct blk_io_trace_remap r = { .device_from = 0, };
1257
1258         get_pdu_remap(ent, &r);
1259         return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1260                                 t_sector(ent), t_sec(ent),
1261                                 MAJOR(r.device_from), MINOR(r.device_from),
1262                                 (unsigned long long)r.sector_from);
1263 }
1264
1265 static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1266 {
1267         char cmd[TASK_COMM_LEN];
1268
1269         trace_find_cmdline(ent->pid, cmd);
1270
1271         return trace_seq_printf(s, "[%s]\n", cmd);
1272 }
1273
1274 static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1275 {
1276         char cmd[TASK_COMM_LEN];
1277
1278         trace_find_cmdline(ent->pid, cmd);
1279
1280         return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
1281 }
1282
1283 static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1284 {
1285         char cmd[TASK_COMM_LEN];
1286
1287         trace_find_cmdline(ent->pid, cmd);
1288
1289         return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1290                                 get_pdu_int(ent), cmd);
1291 }
1292
1293 static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1294 {
1295         int ret;
1296         const struct blk_io_trace *t = te_blk_io_trace(ent);
1297
1298         ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1299         if (ret)
1300                 return trace_seq_putc(s, '\n');
1301         return ret;
1302 }
1303
1304 /*
1305  * struct tracer operations
1306  */
1307
1308 static void blk_tracer_print_header(struct seq_file *m)
1309 {
1310         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1311                 return;
1312         seq_puts(m, "# DEV   CPU TIMESTAMP     PID ACT FLG\n"
1313                     "#  |     |     |           |   |   |\n");
1314 }
1315
1316 static void blk_tracer_start(struct trace_array *tr)
1317 {
1318         blk_tracer_enabled = true;
1319 }
1320
1321 static int blk_tracer_init(struct trace_array *tr)
1322 {
1323         blk_tr = tr;
1324         blk_tracer_start(tr);
1325         return 0;
1326 }
1327
1328 static void blk_tracer_stop(struct trace_array *tr)
1329 {
1330         blk_tracer_enabled = false;
1331 }
1332
1333 static void blk_tracer_reset(struct trace_array *tr)
1334 {
1335         blk_tracer_stop(tr);
1336 }
1337
1338 static const struct {
1339         const char *act[2];
1340         int        (*print)(struct trace_seq *s, const struct trace_entry *ent);
1341 } what2act[] = {
1342         [__BLK_TA_QUEUE]        = {{  "Q", "queue" },      blk_log_generic },
1343         [__BLK_TA_BACKMERGE]    = {{  "M", "backmerge" },  blk_log_generic },
1344         [__BLK_TA_FRONTMERGE]   = {{  "F", "frontmerge" }, blk_log_generic },
1345         [__BLK_TA_GETRQ]        = {{  "G", "getrq" },      blk_log_generic },
1346         [__BLK_TA_SLEEPRQ]      = {{  "S", "sleeprq" },    blk_log_generic },
1347         [__BLK_TA_REQUEUE]      = {{  "R", "requeue" },    blk_log_with_error },
1348         [__BLK_TA_ISSUE]        = {{  "D", "issue" },      blk_log_generic },
1349         [__BLK_TA_COMPLETE]     = {{  "C", "complete" },   blk_log_with_error },
1350         [__BLK_TA_PLUG]         = {{  "P", "plug" },       blk_log_plug },
1351         [__BLK_TA_UNPLUG_IO]    = {{  "U", "unplug_io" },  blk_log_unplug },
1352         [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1353         [__BLK_TA_INSERT]       = {{  "I", "insert" },     blk_log_generic },
1354         [__BLK_TA_SPLIT]        = {{  "X", "split" },      blk_log_split },
1355         [__BLK_TA_BOUNCE]       = {{  "B", "bounce" },     blk_log_generic },
1356         [__BLK_TA_REMAP]        = {{  "A", "remap" },      blk_log_remap },
1357 };
1358
1359 static enum print_line_t print_one_line(struct trace_iterator *iter,
1360                                         bool classic)
1361 {
1362         struct trace_seq *s = &iter->seq;
1363         const struct blk_io_trace *t;
1364         u16 what;
1365         int ret;
1366         bool long_act;
1367         blk_log_action_t *log_action;
1368
1369         t          = te_blk_io_trace(iter->ent);
1370         what       = t->action & ((1 << BLK_TC_SHIFT) - 1);
1371         long_act   = !!(trace_flags & TRACE_ITER_VERBOSE);
1372         log_action = classic ? &blk_log_action_classic : &blk_log_action;
1373
1374         if (t->action == BLK_TN_MESSAGE) {
1375                 ret = log_action(iter, long_act ? "message" : "m");
1376                 if (ret)
1377                         ret = blk_log_msg(s, iter->ent);
1378                 goto out;
1379         }
1380
1381         if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1382                 ret = trace_seq_printf(s, "Unknown action %x\n", what);
1383         else {
1384                 ret = log_action(iter, what2act[what].act[long_act]);
1385                 if (ret)
1386                         ret = what2act[what].print(s, iter->ent);
1387         }
1388 out:
1389         return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1390 }
1391
1392 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1393                                                int flags, struct trace_event *event)
1394 {
1395         return print_one_line(iter, false);
1396 }
1397
1398 static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1399 {
1400         struct trace_seq *s = &iter->seq;
1401         struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1402         const int offset = offsetof(struct blk_io_trace, sector);
1403         struct blk_io_trace old = {
1404                 .magic    = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1405                 .time     = iter->ts,
1406         };
1407
1408         if (!trace_seq_putmem(s, &old, offset))
1409                 return 0;
1410         return trace_seq_putmem(s, &t->sector,
1411                                 sizeof(old) - offset + t->pdu_len);
1412 }
1413
1414 static enum print_line_t
1415 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1416                              struct trace_event *event)
1417 {
1418         return blk_trace_synthesize_old_trace(iter) ?
1419                         TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1420 }
1421
1422 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1423 {
1424         if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1425                 return TRACE_TYPE_UNHANDLED;
1426
1427         return print_one_line(iter, true);
1428 }
1429
1430 static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1431 {
1432         /* don't output context-info for blk_classic output */
1433         if (bit == TRACE_BLK_OPT_CLASSIC) {
1434                 if (set)
1435                         trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1436                 else
1437                         trace_flags |= TRACE_ITER_CONTEXT_INFO;
1438         }
1439         return 0;
1440 }
1441
1442 static struct tracer blk_tracer __read_mostly = {
1443         .name           = "blk",
1444         .init           = blk_tracer_init,
1445         .reset          = blk_tracer_reset,
1446         .start          = blk_tracer_start,
1447         .stop           = blk_tracer_stop,
1448         .print_header   = blk_tracer_print_header,
1449         .print_line     = blk_tracer_print_line,
1450         .flags          = &blk_tracer_flags,
1451         .set_flag       = blk_tracer_set_flag,
1452 };
1453
1454 static struct trace_event_functions trace_blk_event_funcs = {
1455         .trace          = blk_trace_event_print,
1456         .binary         = blk_trace_event_print_binary,
1457 };
1458
1459 static struct trace_event trace_blk_event = {
1460         .type           = TRACE_BLK,
1461         .funcs          = &trace_blk_event_funcs,
1462 };
1463
1464 static int __init init_blk_tracer(void)
1465 {
1466         if (!register_ftrace_event(&trace_blk_event)) {
1467                 pr_warning("Warning: could not register block events\n");
1468                 return 1;
1469         }
1470
1471         if (register_tracer(&blk_tracer) != 0) {
1472                 pr_warning("Warning: could not register the block tracer\n");
1473                 unregister_ftrace_event(&trace_blk_event);
1474                 return 1;
1475         }
1476
1477         return 0;
1478 }
1479
1480 device_initcall(init_blk_tracer);
1481
1482 static int blk_trace_remove_queue(struct request_queue *q)
1483 {
1484         struct blk_trace *bt;
1485
1486         bt = xchg(&q->blk_trace, NULL);
1487         if (bt == NULL)
1488                 return -EINVAL;
1489
1490         if (atomic_dec_and_test(&blk_probes_ref))
1491                 blk_unregister_tracepoints();
1492
1493         blk_trace_free(bt);
1494         return 0;
1495 }
1496
1497 /*
1498  * Setup everything required to start tracing
1499  */
1500 static int blk_trace_setup_queue(struct request_queue *q,
1501                                  struct block_device *bdev)
1502 {
1503         struct blk_trace *old_bt, *bt = NULL;
1504         int ret = -ENOMEM;
1505
1506         bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1507         if (!bt)
1508                 return -ENOMEM;
1509
1510         bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1511         if (!bt->msg_data)
1512                 goto free_bt;
1513
1514         bt->dev = bdev->bd_dev;
1515         bt->act_mask = (u16)-1;
1516
1517         blk_trace_setup_lba(bt, bdev);
1518
1519         old_bt = xchg(&q->blk_trace, bt);
1520         if (old_bt != NULL) {
1521                 (void)xchg(&q->blk_trace, old_bt);
1522                 ret = -EBUSY;
1523                 goto free_bt;
1524         }
1525
1526         if (atomic_inc_return(&blk_probes_ref) == 1)
1527                 blk_register_tracepoints();
1528         return 0;
1529
1530 free_bt:
1531         blk_trace_free(bt);
1532         return ret;
1533 }
1534
1535 /*
1536  * sysfs interface to enable and configure tracing
1537  */
1538
1539 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1540                                          struct device_attribute *attr,
1541                                          char *buf);
1542 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1543                                           struct device_attribute *attr,
1544                                           const char *buf, size_t count);
1545 #define BLK_TRACE_DEVICE_ATTR(_name) \
1546         DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1547                     sysfs_blk_trace_attr_show, \
1548                     sysfs_blk_trace_attr_store)
1549
1550 static BLK_TRACE_DEVICE_ATTR(enable);
1551 static BLK_TRACE_DEVICE_ATTR(act_mask);
1552 static BLK_TRACE_DEVICE_ATTR(pid);
1553 static BLK_TRACE_DEVICE_ATTR(start_lba);
1554 static BLK_TRACE_DEVICE_ATTR(end_lba);
1555
1556 static struct attribute *blk_trace_attrs[] = {
1557         &dev_attr_enable.attr,
1558         &dev_attr_act_mask.attr,
1559         &dev_attr_pid.attr,
1560         &dev_attr_start_lba.attr,
1561         &dev_attr_end_lba.attr,
1562         NULL
1563 };
1564
1565 struct attribute_group blk_trace_attr_group = {
1566         .name  = "trace",
1567         .attrs = blk_trace_attrs,
1568 };
1569
1570 static const struct {
1571         int mask;
1572         const char *str;
1573 } mask_maps[] = {
1574         { BLK_TC_READ,          "read"          },
1575         { BLK_TC_WRITE,         "write"         },
1576         { BLK_TC_FLUSH,         "flush"         },
1577         { BLK_TC_SYNC,          "sync"          },
1578         { BLK_TC_QUEUE,         "queue"         },
1579         { BLK_TC_REQUEUE,       "requeue"       },
1580         { BLK_TC_ISSUE,         "issue"         },
1581         { BLK_TC_COMPLETE,      "complete"      },
1582         { BLK_TC_FS,            "fs"            },
1583         { BLK_TC_PC,            "pc"            },
1584         { BLK_TC_AHEAD,         "ahead"         },
1585         { BLK_TC_META,          "meta"          },
1586         { BLK_TC_DISCARD,       "discard"       },
1587         { BLK_TC_DRV_DATA,      "drv_data"      },
1588         { BLK_TC_FUA,           "fua"           },
1589 };
1590
1591 static int blk_trace_str2mask(const char *str)
1592 {
1593         int i;
1594         int mask = 0;
1595         char *buf, *s, *token;
1596
1597         buf = kstrdup(str, GFP_KERNEL);
1598         if (buf == NULL)
1599                 return -ENOMEM;
1600         s = strstrip(buf);
1601
1602         while (1) {
1603                 token = strsep(&s, ",");
1604                 if (token == NULL)
1605                         break;
1606
1607                 if (*token == '\0')
1608                         continue;
1609
1610                 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1611                         if (strcasecmp(token, mask_maps[i].str) == 0) {
1612                                 mask |= mask_maps[i].mask;
1613                                 break;
1614                         }
1615                 }
1616                 if (i == ARRAY_SIZE(mask_maps)) {
1617                         mask = -EINVAL;
1618                         break;
1619                 }
1620         }
1621         kfree(buf);
1622
1623         return mask;
1624 }
1625
1626 static ssize_t blk_trace_mask2str(char *buf, int mask)
1627 {
1628         int i;
1629         char *p = buf;
1630
1631         for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1632                 if (mask & mask_maps[i].mask) {
1633                         p += sprintf(p, "%s%s",
1634                                     (p == buf) ? "" : ",", mask_maps[i].str);
1635                 }
1636         }
1637         *p++ = '\n';
1638
1639         return p - buf;
1640 }
1641
1642 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1643 {
1644         if (bdev->bd_disk == NULL)
1645                 return NULL;
1646
1647         return bdev_get_queue(bdev);
1648 }
1649
1650 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1651                                          struct device_attribute *attr,
1652                                          char *buf)
1653 {
1654         struct hd_struct *p = dev_to_part(dev);
1655         struct request_queue *q;
1656         struct block_device *bdev;
1657         ssize_t ret = -ENXIO;
1658
1659         bdev = bdget(part_devt(p));
1660         if (bdev == NULL)
1661                 goto out;
1662
1663         q = blk_trace_get_queue(bdev);
1664         if (q == NULL)
1665                 goto out_bdput;
1666
1667         mutex_lock(&bdev->bd_mutex);
1668
1669         if (attr == &dev_attr_enable) {
1670                 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1671                 goto out_unlock_bdev;
1672         }
1673
1674         if (q->blk_trace == NULL)
1675                 ret = sprintf(buf, "disabled\n");
1676         else if (attr == &dev_attr_act_mask)
1677                 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1678         else if (attr == &dev_attr_pid)
1679                 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1680         else if (attr == &dev_attr_start_lba)
1681                 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1682         else if (attr == &dev_attr_end_lba)
1683                 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1684
1685 out_unlock_bdev:
1686         mutex_unlock(&bdev->bd_mutex);
1687 out_bdput:
1688         bdput(bdev);
1689 out:
1690         return ret;
1691 }
1692
1693 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1694                                           struct device_attribute *attr,
1695                                           const char *buf, size_t count)
1696 {
1697         struct block_device *bdev;
1698         struct request_queue *q;
1699         struct hd_struct *p;
1700         u64 value;
1701         ssize_t ret = -EINVAL;
1702
1703         if (count == 0)
1704                 goto out;
1705
1706         if (attr == &dev_attr_act_mask) {
1707                 if (sscanf(buf, "%llx", &value) != 1) {
1708                         /* Assume it is a list of trace category names */
1709                         ret = blk_trace_str2mask(buf);
1710                         if (ret < 0)
1711                                 goto out;
1712                         value = ret;
1713                 }
1714         } else if (sscanf(buf, "%llu", &value) != 1)
1715                 goto out;
1716
1717         ret = -ENXIO;
1718
1719         p = dev_to_part(dev);
1720         bdev = bdget(part_devt(p));
1721         if (bdev == NULL)
1722                 goto out;
1723
1724         q = blk_trace_get_queue(bdev);
1725         if (q == NULL)
1726                 goto out_bdput;
1727
1728         mutex_lock(&bdev->bd_mutex);
1729
1730         if (attr == &dev_attr_enable) {
1731                 if (value)
1732                         ret = blk_trace_setup_queue(q, bdev);
1733                 else
1734                         ret = blk_trace_remove_queue(q);
1735                 goto out_unlock_bdev;
1736         }
1737
1738         ret = 0;
1739         if (q->blk_trace == NULL)
1740                 ret = blk_trace_setup_queue(q, bdev);
1741
1742         if (ret == 0) {
1743                 if (attr == &dev_attr_act_mask)
1744                         q->blk_trace->act_mask = value;
1745                 else if (attr == &dev_attr_pid)
1746                         q->blk_trace->pid = value;
1747                 else if (attr == &dev_attr_start_lba)
1748                         q->blk_trace->start_lba = value;
1749                 else if (attr == &dev_attr_end_lba)
1750                         q->blk_trace->end_lba = value;
1751         }
1752
1753 out_unlock_bdev:
1754         mutex_unlock(&bdev->bd_mutex);
1755 out_bdput:
1756         bdput(bdev);
1757 out:
1758         return ret ? ret : count;
1759 }
1760
1761 int blk_trace_init_sysfs(struct device *dev)
1762 {
1763         return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1764 }
1765
1766 void blk_trace_remove_sysfs(struct device *dev)
1767 {
1768         sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1769 }
1770
1771 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1772
1773 #ifdef CONFIG_EVENT_TRACING
1774
1775 void blk_dump_cmd(char *buf, struct request *rq)
1776 {
1777         int i, end;
1778         int len = rq->cmd_len;
1779         unsigned char *cmd = rq->cmd;
1780
1781         if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
1782                 buf[0] = '\0';
1783                 return;
1784         }
1785
1786         for (end = len - 1; end >= 0; end--)
1787                 if (cmd[end])
1788                         break;
1789         end++;
1790
1791         for (i = 0; i < len; i++) {
1792                 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1793                 if (i == end && end != len - 1) {
1794                         sprintf(buf, " ..");
1795                         break;
1796                 }
1797         }
1798 }
1799
1800 void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1801 {
1802         int i = 0;
1803
1804         if (rw & REQ_FLUSH)
1805                 rwbs[i++] = 'F';
1806
1807         if (rw & WRITE)
1808                 rwbs[i++] = 'W';
1809         else if (rw & REQ_DISCARD)
1810                 rwbs[i++] = 'D';
1811         else if (bytes)
1812                 rwbs[i++] = 'R';
1813         else
1814                 rwbs[i++] = 'N';
1815
1816         if (rw & REQ_FUA)
1817                 rwbs[i++] = 'F';
1818         if (rw & REQ_RAHEAD)
1819                 rwbs[i++] = 'A';
1820         if (rw & REQ_SYNC)
1821                 rwbs[i++] = 'S';
1822         if (rw & REQ_META)
1823                 rwbs[i++] = 'M';
1824         if (rw & REQ_SECURE)
1825                 rwbs[i++] = 'E';
1826
1827         rwbs[i] = '\0';
1828 }
1829
1830 #endif /* CONFIG_EVENT_TRACING */
1831