init: Remove the BKL from startup code
[linux-2.6.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/smp_lock.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/rwsem.h>
36 #include <linux/slab.h>
37 #include <linux/ctype.h>
38 #include <linux/init.h>
39 #include <linux/poll.h>
40 #include <linux/fs.h>
41
42 #include "trace.h"
43 #include "trace_output.h"
44
45 #define TRACE_BUFFER_FLAGS      (RB_FL_OVERWRITE)
46
47 /*
48  * On boot up, the ring buffer is set to the minimum size, so that
49  * we do not waste memory on systems that are not using tracing.
50  */
51 int ring_buffer_expanded;
52
53 /*
54  * We need to change this state when a selftest is running.
55  * A selftest will lurk into the ring-buffer to count the
56  * entries inserted during the selftest although some concurrent
57  * insertions into the ring-buffer such as trace_printk could occurred
58  * at the same time, giving false positive or negative results.
59  */
60 static bool __read_mostly tracing_selftest_running;
61
62 /*
63  * If a tracer is running, we do not want to run SELFTEST.
64  */
65 bool __read_mostly tracing_selftest_disabled;
66
67 /* For tracers that don't implement custom flags */
68 static struct tracer_opt dummy_tracer_opt[] = {
69         { }
70 };
71
72 static struct tracer_flags dummy_tracer_flags = {
73         .val = 0,
74         .opts = dummy_tracer_opt
75 };
76
77 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
78 {
79         return 0;
80 }
81
82 /*
83  * Kill all tracing for good (never come back).
84  * It is initialized to 1 but will turn to zero if the initialization
85  * of the tracer is successful. But that is the only place that sets
86  * this back to zero.
87  */
88 static int tracing_disabled = 1;
89
90 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
91
92 static inline void ftrace_disable_cpu(void)
93 {
94         preempt_disable();
95         __this_cpu_inc(ftrace_cpu_disabled);
96 }
97
98 static inline void ftrace_enable_cpu(void)
99 {
100         __this_cpu_dec(ftrace_cpu_disabled);
101         preempt_enable();
102 }
103
104 static cpumask_var_t __read_mostly      tracing_buffer_mask;
105
106 #define for_each_tracing_cpu(cpu)       \
107         for_each_cpu(cpu, tracing_buffer_mask)
108
109 /*
110  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
111  *
112  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
113  * is set, then ftrace_dump is called. This will output the contents
114  * of the ftrace buffers to the console.  This is very useful for
115  * capturing traces that lead to crashes and outputing it to a
116  * serial console.
117  *
118  * It is default off, but you can enable it with either specifying
119  * "ftrace_dump_on_oops" in the kernel command line, or setting
120  * /proc/sys/kernel/ftrace_dump_on_oops
121  * Set 1 if you want to dump buffers of all CPUs
122  * Set 2 if you want to dump the buffer of the CPU that triggered oops
123  */
124
125 enum ftrace_dump_mode ftrace_dump_on_oops;
126
127 static int tracing_set_tracer(const char *buf);
128
129 #define MAX_TRACER_SIZE         100
130 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
131 static char *default_bootup_tracer;
132
133 static int __init set_cmdline_ftrace(char *str)
134 {
135         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
136         default_bootup_tracer = bootup_tracer_buf;
137         /* We are using ftrace early, expand it */
138         ring_buffer_expanded = 1;
139         return 1;
140 }
141 __setup("ftrace=", set_cmdline_ftrace);
142
143 static int __init set_ftrace_dump_on_oops(char *str)
144 {
145         if (*str++ != '=' || !*str) {
146                 ftrace_dump_on_oops = DUMP_ALL;
147                 return 1;
148         }
149
150         if (!strcmp("orig_cpu", str)) {
151                 ftrace_dump_on_oops = DUMP_ORIG;
152                 return 1;
153         }
154
155         return 0;
156 }
157 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
158
159 unsigned long long ns2usecs(cycle_t nsec)
160 {
161         nsec += 500;
162         do_div(nsec, 1000);
163         return nsec;
164 }
165
166 /*
167  * The global_trace is the descriptor that holds the tracing
168  * buffers for the live tracing. For each CPU, it contains
169  * a link list of pages that will store trace entries. The
170  * page descriptor of the pages in the memory is used to hold
171  * the link list by linking the lru item in the page descriptor
172  * to each of the pages in the buffer per CPU.
173  *
174  * For each active CPU there is a data field that holds the
175  * pages for the buffer for that CPU. Each CPU has the same number
176  * of pages allocated for its buffer.
177  */
178 static struct trace_array       global_trace;
179
180 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
181
182 int filter_current_check_discard(struct ring_buffer *buffer,
183                                  struct ftrace_event_call *call, void *rec,
184                                  struct ring_buffer_event *event)
185 {
186         return filter_check_discard(call, rec, buffer, event);
187 }
188 EXPORT_SYMBOL_GPL(filter_current_check_discard);
189
190 cycle_t ftrace_now(int cpu)
191 {
192         u64 ts;
193
194         /* Early boot up does not have a buffer yet */
195         if (!global_trace.buffer)
196                 return trace_clock_local();
197
198         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
199         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
200
201         return ts;
202 }
203
204 /*
205  * The max_tr is used to snapshot the global_trace when a maximum
206  * latency is reached. Some tracers will use this to store a maximum
207  * trace while it continues examining live traces.
208  *
209  * The buffers for the max_tr are set up the same as the global_trace.
210  * When a snapshot is taken, the link list of the max_tr is swapped
211  * with the link list of the global_trace and the buffers are reset for
212  * the global_trace so the tracing can continue.
213  */
214 static struct trace_array       max_tr;
215
216 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
217
218 /* tracer_enabled is used to toggle activation of a tracer */
219 static int                      tracer_enabled = 1;
220
221 /**
222  * tracing_is_enabled - return tracer_enabled status
223  *
224  * This function is used by other tracers to know the status
225  * of the tracer_enabled flag.  Tracers may use this function
226  * to know if it should enable their features when starting
227  * up. See irqsoff tracer for an example (start_irqsoff_tracer).
228  */
229 int tracing_is_enabled(void)
230 {
231         return tracer_enabled;
232 }
233
234 /*
235  * trace_buf_size is the size in bytes that is allocated
236  * for a buffer. Note, the number of bytes is always rounded
237  * to page size.
238  *
239  * This number is purposely set to a low number of 16384.
240  * If the dump on oops happens, it will be much appreciated
241  * to not have to wait for all that output. Anyway this can be
242  * boot time and run time configurable.
243  */
244 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
245
246 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
247
248 /* trace_types holds a link list of available tracers. */
249 static struct tracer            *trace_types __read_mostly;
250
251 /* current_trace points to the tracer that is currently active */
252 static struct tracer            *current_trace __read_mostly;
253
254 /*
255  * trace_types_lock is used to protect the trace_types list.
256  */
257 static DEFINE_MUTEX(trace_types_lock);
258
259 /*
260  * serialize the access of the ring buffer
261  *
262  * ring buffer serializes readers, but it is low level protection.
263  * The validity of the events (which returns by ring_buffer_peek() ..etc)
264  * are not protected by ring buffer.
265  *
266  * The content of events may become garbage if we allow other process consumes
267  * these events concurrently:
268  *   A) the page of the consumed events may become a normal page
269  *      (not reader page) in ring buffer, and this page will be rewrited
270  *      by events producer.
271  *   B) The page of the consumed events may become a page for splice_read,
272  *      and this page will be returned to system.
273  *
274  * These primitives allow multi process access to different cpu ring buffer
275  * concurrently.
276  *
277  * These primitives don't distinguish read-only and read-consume access.
278  * Multi read-only access are also serialized.
279  */
280
281 #ifdef CONFIG_SMP
282 static DECLARE_RWSEM(all_cpu_access_lock);
283 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
284
285 static inline void trace_access_lock(int cpu)
286 {
287         if (cpu == TRACE_PIPE_ALL_CPU) {
288                 /* gain it for accessing the whole ring buffer. */
289                 down_write(&all_cpu_access_lock);
290         } else {
291                 /* gain it for accessing a cpu ring buffer. */
292
293                 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
294                 down_read(&all_cpu_access_lock);
295
296                 /* Secondly block other access to this @cpu ring buffer. */
297                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
298         }
299 }
300
301 static inline void trace_access_unlock(int cpu)
302 {
303         if (cpu == TRACE_PIPE_ALL_CPU) {
304                 up_write(&all_cpu_access_lock);
305         } else {
306                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
307                 up_read(&all_cpu_access_lock);
308         }
309 }
310
311 static inline void trace_access_lock_init(void)
312 {
313         int cpu;
314
315         for_each_possible_cpu(cpu)
316                 mutex_init(&per_cpu(cpu_access_lock, cpu));
317 }
318
319 #else
320
321 static DEFINE_MUTEX(access_lock);
322
323 static inline void trace_access_lock(int cpu)
324 {
325         (void)cpu;
326         mutex_lock(&access_lock);
327 }
328
329 static inline void trace_access_unlock(int cpu)
330 {
331         (void)cpu;
332         mutex_unlock(&access_lock);
333 }
334
335 static inline void trace_access_lock_init(void)
336 {
337 }
338
339 #endif
340
341 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
342 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
343
344 /* trace_flags holds trace_options default values */
345 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347         TRACE_ITER_GRAPH_TIME;
348
349 static int trace_stop_count;
350 static DEFINE_SPINLOCK(tracing_start_lock);
351
352 /**
353  * trace_wake_up - wake up tasks waiting for trace input
354  *
355  * Simply wakes up any task that is blocked on the trace_wait
356  * queue. These is used with trace_poll for tasks polling the trace.
357  */
358 void trace_wake_up(void)
359 {
360         int cpu;
361
362         if (trace_flags & TRACE_ITER_BLOCK)
363                 return;
364         /*
365          * The runqueue_is_locked() can fail, but this is the best we
366          * have for now:
367          */
368         cpu = get_cpu();
369         if (!runqueue_is_locked(cpu))
370                 wake_up(&trace_wait);
371         put_cpu();
372 }
373
374 static int __init set_buf_size(char *str)
375 {
376         unsigned long buf_size;
377
378         if (!str)
379                 return 0;
380         buf_size = memparse(str, &str);
381         /* nr_entries can not be zero */
382         if (buf_size == 0)
383                 return 0;
384         trace_buf_size = buf_size;
385         return 1;
386 }
387 __setup("trace_buf_size=", set_buf_size);
388
389 static int __init set_tracing_thresh(char *str)
390 {
391         unsigned long threshhold;
392         int ret;
393
394         if (!str)
395                 return 0;
396         ret = strict_strtoul(str, 0, &threshhold);
397         if (ret < 0)
398                 return 0;
399         tracing_thresh = threshhold * 1000;
400         return 1;
401 }
402 __setup("tracing_thresh=", set_tracing_thresh);
403
404 unsigned long nsecs_to_usecs(unsigned long nsecs)
405 {
406         return nsecs / 1000;
407 }
408
409 /* These must match the bit postions in trace_iterator_flags */
410 static const char *trace_options[] = {
411         "print-parent",
412         "sym-offset",
413         "sym-addr",
414         "verbose",
415         "raw",
416         "hex",
417         "bin",
418         "block",
419         "stacktrace",
420         "trace_printk",
421         "ftrace_preempt",
422         "branch",
423         "annotate",
424         "userstacktrace",
425         "sym-userobj",
426         "printk-msg-only",
427         "context-info",
428         "latency-format",
429         "sleep-time",
430         "graph-time",
431         NULL
432 };
433
434 static struct {
435         u64 (*func)(void);
436         const char *name;
437 } trace_clocks[] = {
438         { trace_clock_local,    "local" },
439         { trace_clock_global,   "global" },
440 };
441
442 int trace_clock_id;
443
444 /*
445  * trace_parser_get_init - gets the buffer for trace parser
446  */
447 int trace_parser_get_init(struct trace_parser *parser, int size)
448 {
449         memset(parser, 0, sizeof(*parser));
450
451         parser->buffer = kmalloc(size, GFP_KERNEL);
452         if (!parser->buffer)
453                 return 1;
454
455         parser->size = size;
456         return 0;
457 }
458
459 /*
460  * trace_parser_put - frees the buffer for trace parser
461  */
462 void trace_parser_put(struct trace_parser *parser)
463 {
464         kfree(parser->buffer);
465 }
466
467 /*
468  * trace_get_user - reads the user input string separated by  space
469  * (matched by isspace(ch))
470  *
471  * For each string found the 'struct trace_parser' is updated,
472  * and the function returns.
473  *
474  * Returns number of bytes read.
475  *
476  * See kernel/trace/trace.h for 'struct trace_parser' details.
477  */
478 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
479         size_t cnt, loff_t *ppos)
480 {
481         char ch;
482         size_t read = 0;
483         ssize_t ret;
484
485         if (!*ppos)
486                 trace_parser_clear(parser);
487
488         ret = get_user(ch, ubuf++);
489         if (ret)
490                 goto out;
491
492         read++;
493         cnt--;
494
495         /*
496          * The parser is not finished with the last write,
497          * continue reading the user input without skipping spaces.
498          */
499         if (!parser->cont) {
500                 /* skip white space */
501                 while (cnt && isspace(ch)) {
502                         ret = get_user(ch, ubuf++);
503                         if (ret)
504                                 goto out;
505                         read++;
506                         cnt--;
507                 }
508
509                 /* only spaces were written */
510                 if (isspace(ch)) {
511                         *ppos += read;
512                         ret = read;
513                         goto out;
514                 }
515
516                 parser->idx = 0;
517         }
518
519         /* read the non-space input */
520         while (cnt && !isspace(ch)) {
521                 if (parser->idx < parser->size - 1)
522                         parser->buffer[parser->idx++] = ch;
523                 else {
524                         ret = -EINVAL;
525                         goto out;
526                 }
527                 ret = get_user(ch, ubuf++);
528                 if (ret)
529                         goto out;
530                 read++;
531                 cnt--;
532         }
533
534         /* We either got finished input or we have to wait for another call. */
535         if (isspace(ch)) {
536                 parser->buffer[parser->idx] = 0;
537                 parser->cont = false;
538         } else {
539                 parser->cont = true;
540                 parser->buffer[parser->idx++] = ch;
541         }
542
543         *ppos += read;
544         ret = read;
545
546 out:
547         return ret;
548 }
549
550 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
551 {
552         int len;
553         int ret;
554
555         if (!cnt)
556                 return 0;
557
558         if (s->len <= s->readpos)
559                 return -EBUSY;
560
561         len = s->len - s->readpos;
562         if (cnt > len)
563                 cnt = len;
564         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
565         if (ret == cnt)
566                 return -EFAULT;
567
568         cnt -= ret;
569
570         s->readpos += cnt;
571         return cnt;
572 }
573
574 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
575 {
576         int len;
577         void *ret;
578
579         if (s->len <= s->readpos)
580                 return -EBUSY;
581
582         len = s->len - s->readpos;
583         if (cnt > len)
584                 cnt = len;
585         ret = memcpy(buf, s->buffer + s->readpos, cnt);
586         if (!ret)
587                 return -EFAULT;
588
589         s->readpos += cnt;
590         return cnt;
591 }
592
593 /*
594  * ftrace_max_lock is used to protect the swapping of buffers
595  * when taking a max snapshot. The buffers themselves are
596  * protected by per_cpu spinlocks. But the action of the swap
597  * needs its own lock.
598  *
599  * This is defined as a arch_spinlock_t in order to help
600  * with performance when lockdep debugging is enabled.
601  *
602  * It is also used in other places outside the update_max_tr
603  * so it needs to be defined outside of the
604  * CONFIG_TRACER_MAX_TRACE.
605  */
606 static arch_spinlock_t ftrace_max_lock =
607         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
608
609 unsigned long __read_mostly     tracing_thresh;
610
611 #ifdef CONFIG_TRACER_MAX_TRACE
612 unsigned long __read_mostly     tracing_max_latency;
613
614 /*
615  * Copy the new maximum trace into the separate maximum-trace
616  * structure. (this way the maximum trace is permanently saved,
617  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
618  */
619 static void
620 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
621 {
622         struct trace_array_cpu *data = tr->data[cpu];
623         struct trace_array_cpu *max_data;
624
625         max_tr.cpu = cpu;
626         max_tr.time_start = data->preempt_timestamp;
627
628         max_data = max_tr.data[cpu];
629         max_data->saved_latency = tracing_max_latency;
630         max_data->critical_start = data->critical_start;
631         max_data->critical_end = data->critical_end;
632
633         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
634         max_data->pid = tsk->pid;
635         max_data->uid = task_uid(tsk);
636         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
637         max_data->policy = tsk->policy;
638         max_data->rt_priority = tsk->rt_priority;
639
640         /* record this tasks comm */
641         tracing_record_cmdline(tsk);
642 }
643
644 /**
645  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
646  * @tr: tracer
647  * @tsk: the task with the latency
648  * @cpu: The cpu that initiated the trace.
649  *
650  * Flip the buffers between the @tr and the max_tr and record information
651  * about which task was the cause of this latency.
652  */
653 void
654 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
655 {
656         struct ring_buffer *buf = tr->buffer;
657
658         if (trace_stop_count)
659                 return;
660
661         WARN_ON_ONCE(!irqs_disabled());
662         arch_spin_lock(&ftrace_max_lock);
663
664         tr->buffer = max_tr.buffer;
665         max_tr.buffer = buf;
666
667         __update_max_tr(tr, tsk, cpu);
668         arch_spin_unlock(&ftrace_max_lock);
669 }
670
671 /**
672  * update_max_tr_single - only copy one trace over, and reset the rest
673  * @tr - tracer
674  * @tsk - task with the latency
675  * @cpu - the cpu of the buffer to copy.
676  *
677  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
678  */
679 void
680 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
681 {
682         int ret;
683
684         if (trace_stop_count)
685                 return;
686
687         WARN_ON_ONCE(!irqs_disabled());
688         arch_spin_lock(&ftrace_max_lock);
689
690         ftrace_disable_cpu();
691
692         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
693
694         if (ret == -EBUSY) {
695                 /*
696                  * We failed to swap the buffer due to a commit taking
697                  * place on this CPU. We fail to record, but we reset
698                  * the max trace buffer (no one writes directly to it)
699                  * and flag that it failed.
700                  */
701                 trace_array_printk(&max_tr, _THIS_IP_,
702                         "Failed to swap buffers due to commit in progress\n");
703         }
704
705         ftrace_enable_cpu();
706
707         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
708
709         __update_max_tr(tr, tsk, cpu);
710         arch_spin_unlock(&ftrace_max_lock);
711 }
712 #endif /* CONFIG_TRACER_MAX_TRACE */
713
714 /**
715  * register_tracer - register a tracer with the ftrace system.
716  * @type - the plugin for the tracer
717  *
718  * Register a new plugin tracer.
719  */
720 int register_tracer(struct tracer *type)
721 __releases(kernel_lock)
722 __acquires(kernel_lock)
723 {
724         struct tracer *t;
725         int ret = 0;
726
727         if (!type->name) {
728                 pr_info("Tracer must have a name\n");
729                 return -1;
730         }
731
732         if (strlen(type->name) > MAX_TRACER_SIZE) {
733                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
734                 return -1;
735         }
736
737         mutex_lock(&trace_types_lock);
738
739         tracing_selftest_running = true;
740
741         for (t = trace_types; t; t = t->next) {
742                 if (strcmp(type->name, t->name) == 0) {
743                         /* already found */
744                         pr_info("Tracer %s already registered\n",
745                                 type->name);
746                         ret = -1;
747                         goto out;
748                 }
749         }
750
751         if (!type->set_flag)
752                 type->set_flag = &dummy_set_flag;
753         if (!type->flags)
754                 type->flags = &dummy_tracer_flags;
755         else
756                 if (!type->flags->opts)
757                         type->flags->opts = dummy_tracer_opt;
758         if (!type->wait_pipe)
759                 type->wait_pipe = default_wait_pipe;
760
761
762 #ifdef CONFIG_FTRACE_STARTUP_TEST
763         if (type->selftest && !tracing_selftest_disabled) {
764                 struct tracer *saved_tracer = current_trace;
765                 struct trace_array *tr = &global_trace;
766
767                 /*
768                  * Run a selftest on this tracer.
769                  * Here we reset the trace buffer, and set the current
770                  * tracer to be this tracer. The tracer can then run some
771                  * internal tracing to verify that everything is in order.
772                  * If we fail, we do not register this tracer.
773                  */
774                 tracing_reset_online_cpus(tr);
775
776                 current_trace = type;
777                 /* the test is responsible for initializing and enabling */
778                 pr_info("Testing tracer %s: ", type->name);
779                 ret = type->selftest(type, tr);
780                 /* the test is responsible for resetting too */
781                 current_trace = saved_tracer;
782                 if (ret) {
783                         printk(KERN_CONT "FAILED!\n");
784                         goto out;
785                 }
786                 /* Only reset on passing, to avoid touching corrupted buffers */
787                 tracing_reset_online_cpus(tr);
788
789                 printk(KERN_CONT "PASSED\n");
790         }
791 #endif
792
793         type->next = trace_types;
794         trace_types = type;
795
796  out:
797         tracing_selftest_running = false;
798         mutex_unlock(&trace_types_lock);
799
800         if (ret || !default_bootup_tracer)
801                 goto out_unlock;
802
803         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
804                 goto out_unlock;
805
806         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
807         /* Do we want this tracer to start on bootup? */
808         tracing_set_tracer(type->name);
809         default_bootup_tracer = NULL;
810         /* disable other selftests, since this will break it. */
811         tracing_selftest_disabled = 1;
812 #ifdef CONFIG_FTRACE_STARTUP_TEST
813         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
814                type->name);
815 #endif
816
817  out_unlock:
818         return ret;
819 }
820
821 void unregister_tracer(struct tracer *type)
822 {
823         struct tracer **t;
824
825         mutex_lock(&trace_types_lock);
826         for (t = &trace_types; *t; t = &(*t)->next) {
827                 if (*t == type)
828                         goto found;
829         }
830         pr_info("Tracer %s not registered\n", type->name);
831         goto out;
832
833  found:
834         *t = (*t)->next;
835
836         if (type == current_trace && tracer_enabled) {
837                 tracer_enabled = 0;
838                 tracing_stop();
839                 if (current_trace->stop)
840                         current_trace->stop(&global_trace);
841                 current_trace = &nop_trace;
842         }
843 out:
844         mutex_unlock(&trace_types_lock);
845 }
846
847 static void __tracing_reset(struct ring_buffer *buffer, int cpu)
848 {
849         ftrace_disable_cpu();
850         ring_buffer_reset_cpu(buffer, cpu);
851         ftrace_enable_cpu();
852 }
853
854 void tracing_reset(struct trace_array *tr, int cpu)
855 {
856         struct ring_buffer *buffer = tr->buffer;
857
858         ring_buffer_record_disable(buffer);
859
860         /* Make sure all commits have finished */
861         synchronize_sched();
862         __tracing_reset(buffer, cpu);
863
864         ring_buffer_record_enable(buffer);
865 }
866
867 void tracing_reset_online_cpus(struct trace_array *tr)
868 {
869         struct ring_buffer *buffer = tr->buffer;
870         int cpu;
871
872         ring_buffer_record_disable(buffer);
873
874         /* Make sure all commits have finished */
875         synchronize_sched();
876
877         tr->time_start = ftrace_now(tr->cpu);
878
879         for_each_online_cpu(cpu)
880                 __tracing_reset(buffer, cpu);
881
882         ring_buffer_record_enable(buffer);
883 }
884
885 void tracing_reset_current(int cpu)
886 {
887         tracing_reset(&global_trace, cpu);
888 }
889
890 void tracing_reset_current_online_cpus(void)
891 {
892         tracing_reset_online_cpus(&global_trace);
893 }
894
895 #define SAVED_CMDLINES 128
896 #define NO_CMDLINE_MAP UINT_MAX
897 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
898 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
899 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
900 static int cmdline_idx;
901 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
902
903 /* temporary disable recording */
904 static atomic_t trace_record_cmdline_disabled __read_mostly;
905
906 static void trace_init_cmdlines(void)
907 {
908         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
909         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
910         cmdline_idx = 0;
911 }
912
913 int is_tracing_stopped(void)
914 {
915         return trace_stop_count;
916 }
917
918 /**
919  * ftrace_off_permanent - disable all ftrace code permanently
920  *
921  * This should only be called when a serious anomally has
922  * been detected.  This will turn off the function tracing,
923  * ring buffers, and other tracing utilites. It takes no
924  * locks and can be called from any context.
925  */
926 void ftrace_off_permanent(void)
927 {
928         tracing_disabled = 1;
929         ftrace_stop();
930         tracing_off_permanent();
931 }
932
933 /**
934  * tracing_start - quick start of the tracer
935  *
936  * If tracing is enabled but was stopped by tracing_stop,
937  * this will start the tracer back up.
938  */
939 void tracing_start(void)
940 {
941         struct ring_buffer *buffer;
942         unsigned long flags;
943
944         if (tracing_disabled)
945                 return;
946
947         spin_lock_irqsave(&tracing_start_lock, flags);
948         if (--trace_stop_count) {
949                 if (trace_stop_count < 0) {
950                         /* Someone screwed up their debugging */
951                         WARN_ON_ONCE(1);
952                         trace_stop_count = 0;
953                 }
954                 goto out;
955         }
956
957         /* Prevent the buffers from switching */
958         arch_spin_lock(&ftrace_max_lock);
959
960         buffer = global_trace.buffer;
961         if (buffer)
962                 ring_buffer_record_enable(buffer);
963
964         buffer = max_tr.buffer;
965         if (buffer)
966                 ring_buffer_record_enable(buffer);
967
968         arch_spin_unlock(&ftrace_max_lock);
969
970         ftrace_start();
971  out:
972         spin_unlock_irqrestore(&tracing_start_lock, flags);
973 }
974
975 /**
976  * tracing_stop - quick stop of the tracer
977  *
978  * Light weight way to stop tracing. Use in conjunction with
979  * tracing_start.
980  */
981 void tracing_stop(void)
982 {
983         struct ring_buffer *buffer;
984         unsigned long flags;
985
986         ftrace_stop();
987         spin_lock_irqsave(&tracing_start_lock, flags);
988         if (trace_stop_count++)
989                 goto out;
990
991         /* Prevent the buffers from switching */
992         arch_spin_lock(&ftrace_max_lock);
993
994         buffer = global_trace.buffer;
995         if (buffer)
996                 ring_buffer_record_disable(buffer);
997
998         buffer = max_tr.buffer;
999         if (buffer)
1000                 ring_buffer_record_disable(buffer);
1001
1002         arch_spin_unlock(&ftrace_max_lock);
1003
1004  out:
1005         spin_unlock_irqrestore(&tracing_start_lock, flags);
1006 }
1007
1008 void trace_stop_cmdline_recording(void);
1009
1010 static void trace_save_cmdline(struct task_struct *tsk)
1011 {
1012         unsigned pid, idx;
1013
1014         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1015                 return;
1016
1017         /*
1018          * It's not the end of the world if we don't get
1019          * the lock, but we also don't want to spin
1020          * nor do we want to disable interrupts,
1021          * so if we miss here, then better luck next time.
1022          */
1023         if (!arch_spin_trylock(&trace_cmdline_lock))
1024                 return;
1025
1026         idx = map_pid_to_cmdline[tsk->pid];
1027         if (idx == NO_CMDLINE_MAP) {
1028                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1029
1030                 /*
1031                  * Check whether the cmdline buffer at idx has a pid
1032                  * mapped. We are going to overwrite that entry so we
1033                  * need to clear the map_pid_to_cmdline. Otherwise we
1034                  * would read the new comm for the old pid.
1035                  */
1036                 pid = map_cmdline_to_pid[idx];
1037                 if (pid != NO_CMDLINE_MAP)
1038                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1039
1040                 map_cmdline_to_pid[idx] = tsk->pid;
1041                 map_pid_to_cmdline[tsk->pid] = idx;
1042
1043                 cmdline_idx = idx;
1044         }
1045
1046         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1047
1048         arch_spin_unlock(&trace_cmdline_lock);
1049 }
1050
1051 void trace_find_cmdline(int pid, char comm[])
1052 {
1053         unsigned map;
1054
1055         if (!pid) {
1056                 strcpy(comm, "<idle>");
1057                 return;
1058         }
1059
1060         if (WARN_ON_ONCE(pid < 0)) {
1061                 strcpy(comm, "<XXX>");
1062                 return;
1063         }
1064
1065         if (pid > PID_MAX_DEFAULT) {
1066                 strcpy(comm, "<...>");
1067                 return;
1068         }
1069
1070         preempt_disable();
1071         arch_spin_lock(&trace_cmdline_lock);
1072         map = map_pid_to_cmdline[pid];
1073         if (map != NO_CMDLINE_MAP)
1074                 strcpy(comm, saved_cmdlines[map]);
1075         else
1076                 strcpy(comm, "<...>");
1077
1078         arch_spin_unlock(&trace_cmdline_lock);
1079         preempt_enable();
1080 }
1081
1082 void tracing_record_cmdline(struct task_struct *tsk)
1083 {
1084         if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1085             !tracing_is_on())
1086                 return;
1087
1088         trace_save_cmdline(tsk);
1089 }
1090
1091 void
1092 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1093                              int pc)
1094 {
1095         struct task_struct *tsk = current;
1096
1097         entry->preempt_count            = pc & 0xff;
1098         entry->pid                      = (tsk) ? tsk->pid : 0;
1099         entry->lock_depth               = (tsk) ? tsk->lock_depth : 0;
1100         entry->flags =
1101 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1102                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1103 #else
1104                 TRACE_FLAG_IRQS_NOSUPPORT |
1105 #endif
1106                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1107                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1108                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1109 }
1110 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1111
1112 struct ring_buffer_event *
1113 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1114                           int type,
1115                           unsigned long len,
1116                           unsigned long flags, int pc)
1117 {
1118         struct ring_buffer_event *event;
1119
1120         event = ring_buffer_lock_reserve(buffer, len);
1121         if (event != NULL) {
1122                 struct trace_entry *ent = ring_buffer_event_data(event);
1123
1124                 tracing_generic_entry_update(ent, flags, pc);
1125                 ent->type = type;
1126         }
1127
1128         return event;
1129 }
1130
1131 static inline void
1132 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1133                              struct ring_buffer_event *event,
1134                              unsigned long flags, int pc,
1135                              int wake)
1136 {
1137         ring_buffer_unlock_commit(buffer, event);
1138
1139         ftrace_trace_stack(buffer, flags, 6, pc);
1140         ftrace_trace_userstack(buffer, flags, pc);
1141
1142         if (wake)
1143                 trace_wake_up();
1144 }
1145
1146 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1147                                 struct ring_buffer_event *event,
1148                                 unsigned long flags, int pc)
1149 {
1150         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1151 }
1152
1153 struct ring_buffer_event *
1154 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1155                                   int type, unsigned long len,
1156                                   unsigned long flags, int pc)
1157 {
1158         *current_rb = global_trace.buffer;
1159         return trace_buffer_lock_reserve(*current_rb,
1160                                          type, len, flags, pc);
1161 }
1162 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1163
1164 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1165                                         struct ring_buffer_event *event,
1166                                         unsigned long flags, int pc)
1167 {
1168         __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1169 }
1170 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1171
1172 void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1173                                        struct ring_buffer_event *event,
1174                                        unsigned long flags, int pc)
1175 {
1176         __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1177 }
1178 EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1179
1180 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1181                                          struct ring_buffer_event *event)
1182 {
1183         ring_buffer_discard_commit(buffer, event);
1184 }
1185 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1186
1187 void
1188 trace_function(struct trace_array *tr,
1189                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1190                int pc)
1191 {
1192         struct ftrace_event_call *call = &event_function;
1193         struct ring_buffer *buffer = tr->buffer;
1194         struct ring_buffer_event *event;
1195         struct ftrace_entry *entry;
1196
1197         /* If we are reading the ring buffer, don't trace */
1198         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1199                 return;
1200
1201         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1202                                           flags, pc);
1203         if (!event)
1204                 return;
1205         entry   = ring_buffer_event_data(event);
1206         entry->ip                       = ip;
1207         entry->parent_ip                = parent_ip;
1208
1209         if (!filter_check_discard(call, entry, buffer, event))
1210                 ring_buffer_unlock_commit(buffer, event);
1211 }
1212
1213 void
1214 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1215        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1216        int pc)
1217 {
1218         if (likely(!atomic_read(&data->disabled)))
1219                 trace_function(tr, ip, parent_ip, flags, pc);
1220 }
1221
1222 #ifdef CONFIG_STACKTRACE
1223 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1224                                  unsigned long flags,
1225                                  int skip, int pc)
1226 {
1227         struct ftrace_event_call *call = &event_kernel_stack;
1228         struct ring_buffer_event *event;
1229         struct stack_entry *entry;
1230         struct stack_trace trace;
1231
1232         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1233                                           sizeof(*entry), flags, pc);
1234         if (!event)
1235                 return;
1236         entry   = ring_buffer_event_data(event);
1237         memset(&entry->caller, 0, sizeof(entry->caller));
1238
1239         trace.nr_entries        = 0;
1240         trace.max_entries       = FTRACE_STACK_ENTRIES;
1241         trace.skip              = skip;
1242         trace.entries           = entry->caller;
1243
1244         save_stack_trace(&trace);
1245         if (!filter_check_discard(call, entry, buffer, event))
1246                 ring_buffer_unlock_commit(buffer, event);
1247 }
1248
1249 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1250                         int skip, int pc)
1251 {
1252         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1253                 return;
1254
1255         __ftrace_trace_stack(buffer, flags, skip, pc);
1256 }
1257
1258 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1259                    int pc)
1260 {
1261         __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1262 }
1263
1264 /**
1265  * trace_dump_stack - record a stack back trace in the trace buffer
1266  */
1267 void trace_dump_stack(void)
1268 {
1269         unsigned long flags;
1270
1271         if (tracing_disabled || tracing_selftest_running)
1272                 return;
1273
1274         local_save_flags(flags);
1275
1276         /* skipping 3 traces, seems to get us at the caller of this function */
1277         __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1278 }
1279
1280 void
1281 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1282 {
1283         struct ftrace_event_call *call = &event_user_stack;
1284         struct ring_buffer_event *event;
1285         struct userstack_entry *entry;
1286         struct stack_trace trace;
1287
1288         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1289                 return;
1290
1291         /*
1292          * NMIs can not handle page faults, even with fix ups.
1293          * The save user stack can (and often does) fault.
1294          */
1295         if (unlikely(in_nmi()))
1296                 return;
1297
1298         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1299                                           sizeof(*entry), flags, pc);
1300         if (!event)
1301                 return;
1302         entry   = ring_buffer_event_data(event);
1303
1304         entry->tgid             = current->tgid;
1305         memset(&entry->caller, 0, sizeof(entry->caller));
1306
1307         trace.nr_entries        = 0;
1308         trace.max_entries       = FTRACE_STACK_ENTRIES;
1309         trace.skip              = 0;
1310         trace.entries           = entry->caller;
1311
1312         save_stack_trace_user(&trace);
1313         if (!filter_check_discard(call, entry, buffer, event))
1314                 ring_buffer_unlock_commit(buffer, event);
1315 }
1316
1317 #ifdef UNUSED
1318 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1319 {
1320         ftrace_trace_userstack(tr, flags, preempt_count());
1321 }
1322 #endif /* UNUSED */
1323
1324 #endif /* CONFIG_STACKTRACE */
1325
1326 static void
1327 ftrace_trace_special(void *__tr,
1328                      unsigned long arg1, unsigned long arg2, unsigned long arg3,
1329                      int pc)
1330 {
1331         struct ftrace_event_call *call = &event_special;
1332         struct ring_buffer_event *event;
1333         struct trace_array *tr = __tr;
1334         struct ring_buffer *buffer = tr->buffer;
1335         struct special_entry *entry;
1336
1337         event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1338                                           sizeof(*entry), 0, pc);
1339         if (!event)
1340                 return;
1341         entry   = ring_buffer_event_data(event);
1342         entry->arg1                     = arg1;
1343         entry->arg2                     = arg2;
1344         entry->arg3                     = arg3;
1345
1346         if (!filter_check_discard(call, entry, buffer, event))
1347                 trace_buffer_unlock_commit(buffer, event, 0, pc);
1348 }
1349
1350 void
1351 __trace_special(void *__tr, void *__data,
1352                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1353 {
1354         ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
1355 }
1356
1357 void
1358 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1359 {
1360         struct trace_array *tr = &global_trace;
1361         struct trace_array_cpu *data;
1362         unsigned long flags;
1363         int cpu;
1364         int pc;
1365
1366         if (tracing_disabled)
1367                 return;
1368
1369         pc = preempt_count();
1370         local_irq_save(flags);
1371         cpu = raw_smp_processor_id();
1372         data = tr->data[cpu];
1373
1374         if (likely(atomic_inc_return(&data->disabled) == 1))
1375                 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
1376
1377         atomic_dec(&data->disabled);
1378         local_irq_restore(flags);
1379 }
1380
1381 /**
1382  * trace_vbprintk - write binary msg to tracing buffer
1383  *
1384  */
1385 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1386 {
1387         static arch_spinlock_t trace_buf_lock =
1388                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1389         static u32 trace_buf[TRACE_BUF_SIZE];
1390
1391         struct ftrace_event_call *call = &event_bprint;
1392         struct ring_buffer_event *event;
1393         struct ring_buffer *buffer;
1394         struct trace_array *tr = &global_trace;
1395         struct trace_array_cpu *data;
1396         struct bprint_entry *entry;
1397         unsigned long flags;
1398         int disable;
1399         int resched;
1400         int cpu, len = 0, size, pc;
1401
1402         if (unlikely(tracing_selftest_running || tracing_disabled))
1403                 return 0;
1404
1405         /* Don't pollute graph traces with trace_vprintk internals */
1406         pause_graph_tracing();
1407
1408         pc = preempt_count();
1409         resched = ftrace_preempt_disable();
1410         cpu = raw_smp_processor_id();
1411         data = tr->data[cpu];
1412
1413         disable = atomic_inc_return(&data->disabled);
1414         if (unlikely(disable != 1))
1415                 goto out;
1416
1417         /* Lockdep uses trace_printk for lock tracing */
1418         local_irq_save(flags);
1419         arch_spin_lock(&trace_buf_lock);
1420         len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1421
1422         if (len > TRACE_BUF_SIZE || len < 0)
1423                 goto out_unlock;
1424
1425         size = sizeof(*entry) + sizeof(u32) * len;
1426         buffer = tr->buffer;
1427         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1428                                           flags, pc);
1429         if (!event)
1430                 goto out_unlock;
1431         entry = ring_buffer_event_data(event);
1432         entry->ip                       = ip;
1433         entry->fmt                      = fmt;
1434
1435         memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1436         if (!filter_check_discard(call, entry, buffer, event)) {
1437                 ring_buffer_unlock_commit(buffer, event);
1438                 ftrace_trace_stack(buffer, flags, 6, pc);
1439         }
1440
1441 out_unlock:
1442         arch_spin_unlock(&trace_buf_lock);
1443         local_irq_restore(flags);
1444
1445 out:
1446         atomic_dec_return(&data->disabled);
1447         ftrace_preempt_enable(resched);
1448         unpause_graph_tracing();
1449
1450         return len;
1451 }
1452 EXPORT_SYMBOL_GPL(trace_vbprintk);
1453
1454 int trace_array_printk(struct trace_array *tr,
1455                        unsigned long ip, const char *fmt, ...)
1456 {
1457         int ret;
1458         va_list ap;
1459
1460         if (!(trace_flags & TRACE_ITER_PRINTK))
1461                 return 0;
1462
1463         va_start(ap, fmt);
1464         ret = trace_array_vprintk(tr, ip, fmt, ap);
1465         va_end(ap);
1466         return ret;
1467 }
1468
1469 int trace_array_vprintk(struct trace_array *tr,
1470                         unsigned long ip, const char *fmt, va_list args)
1471 {
1472         static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1473         static char trace_buf[TRACE_BUF_SIZE];
1474
1475         struct ftrace_event_call *call = &event_print;
1476         struct ring_buffer_event *event;
1477         struct ring_buffer *buffer;
1478         struct trace_array_cpu *data;
1479         int cpu, len = 0, size, pc;
1480         struct print_entry *entry;
1481         unsigned long irq_flags;
1482         int disable;
1483
1484         if (tracing_disabled || tracing_selftest_running)
1485                 return 0;
1486
1487         pc = preempt_count();
1488         preempt_disable_notrace();
1489         cpu = raw_smp_processor_id();
1490         data = tr->data[cpu];
1491
1492         disable = atomic_inc_return(&data->disabled);
1493         if (unlikely(disable != 1))
1494                 goto out;
1495
1496         pause_graph_tracing();
1497         raw_local_irq_save(irq_flags);
1498         arch_spin_lock(&trace_buf_lock);
1499         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1500
1501         size = sizeof(*entry) + len + 1;
1502         buffer = tr->buffer;
1503         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1504                                           irq_flags, pc);
1505         if (!event)
1506                 goto out_unlock;
1507         entry = ring_buffer_event_data(event);
1508         entry->ip = ip;
1509
1510         memcpy(&entry->buf, trace_buf, len);
1511         entry->buf[len] = '\0';
1512         if (!filter_check_discard(call, entry, buffer, event)) {
1513                 ring_buffer_unlock_commit(buffer, event);
1514                 ftrace_trace_stack(buffer, irq_flags, 6, pc);
1515         }
1516
1517  out_unlock:
1518         arch_spin_unlock(&trace_buf_lock);
1519         raw_local_irq_restore(irq_flags);
1520         unpause_graph_tracing();
1521  out:
1522         atomic_dec_return(&data->disabled);
1523         preempt_enable_notrace();
1524
1525         return len;
1526 }
1527
1528 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1529 {
1530         return trace_array_vprintk(&global_trace, ip, fmt, args);
1531 }
1532 EXPORT_SYMBOL_GPL(trace_vprintk);
1533
1534 enum trace_file_type {
1535         TRACE_FILE_LAT_FMT      = 1,
1536         TRACE_FILE_ANNOTATE     = 2,
1537 };
1538
1539 static void trace_iterator_increment(struct trace_iterator *iter)
1540 {
1541         /* Don't allow ftrace to trace into the ring buffers */
1542         ftrace_disable_cpu();
1543
1544         iter->idx++;
1545         if (iter->buffer_iter[iter->cpu])
1546                 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1547
1548         ftrace_enable_cpu();
1549 }
1550
1551 static struct trace_entry *
1552 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1553                 unsigned long *lost_events)
1554 {
1555         struct ring_buffer_event *event;
1556         struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1557
1558         /* Don't allow ftrace to trace into the ring buffers */
1559         ftrace_disable_cpu();
1560
1561         if (buf_iter)
1562                 event = ring_buffer_iter_peek(buf_iter, ts);
1563         else
1564                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1565                                          lost_events);
1566
1567         ftrace_enable_cpu();
1568
1569         return event ? ring_buffer_event_data(event) : NULL;
1570 }
1571
1572 static struct trace_entry *
1573 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1574                   unsigned long *missing_events, u64 *ent_ts)
1575 {
1576         struct ring_buffer *buffer = iter->tr->buffer;
1577         struct trace_entry *ent, *next = NULL;
1578         unsigned long lost_events = 0, next_lost = 0;
1579         int cpu_file = iter->cpu_file;
1580         u64 next_ts = 0, ts;
1581         int next_cpu = -1;
1582         int cpu;
1583
1584         /*
1585          * If we are in a per_cpu trace file, don't bother by iterating over
1586          * all cpu and peek directly.
1587          */
1588         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1589                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1590                         return NULL;
1591                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1592                 if (ent_cpu)
1593                         *ent_cpu = cpu_file;
1594
1595                 return ent;
1596         }
1597
1598         for_each_tracing_cpu(cpu) {
1599
1600                 if (ring_buffer_empty_cpu(buffer, cpu))
1601                         continue;
1602
1603                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1604
1605                 /*
1606                  * Pick the entry with the smallest timestamp:
1607                  */
1608                 if (ent && (!next || ts < next_ts)) {
1609                         next = ent;
1610                         next_cpu = cpu;
1611                         next_ts = ts;
1612                         next_lost = lost_events;
1613                 }
1614         }
1615
1616         if (ent_cpu)
1617                 *ent_cpu = next_cpu;
1618
1619         if (ent_ts)
1620                 *ent_ts = next_ts;
1621
1622         if (missing_events)
1623                 *missing_events = next_lost;
1624
1625         return next;
1626 }
1627
1628 /* Find the next real entry, without updating the iterator itself */
1629 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1630                                           int *ent_cpu, u64 *ent_ts)
1631 {
1632         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1633 }
1634
1635 /* Find the next real entry, and increment the iterator to the next entry */
1636 static void *find_next_entry_inc(struct trace_iterator *iter)
1637 {
1638         iter->ent = __find_next_entry(iter, &iter->cpu,
1639                                       &iter->lost_events, &iter->ts);
1640
1641         if (iter->ent)
1642                 trace_iterator_increment(iter);
1643
1644         return iter->ent ? iter : NULL;
1645 }
1646
1647 static void trace_consume(struct trace_iterator *iter)
1648 {
1649         /* Don't allow ftrace to trace into the ring buffers */
1650         ftrace_disable_cpu();
1651         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1652                             &iter->lost_events);
1653         ftrace_enable_cpu();
1654 }
1655
1656 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1657 {
1658         struct trace_iterator *iter = m->private;
1659         int i = (int)*pos;
1660         void *ent;
1661
1662         WARN_ON_ONCE(iter->leftover);
1663
1664         (*pos)++;
1665
1666         /* can't go backwards */
1667         if (iter->idx > i)
1668                 return NULL;
1669
1670         if (iter->idx < 0)
1671                 ent = find_next_entry_inc(iter);
1672         else
1673                 ent = iter;
1674
1675         while (ent && iter->idx < i)
1676                 ent = find_next_entry_inc(iter);
1677
1678         iter->pos = *pos;
1679
1680         return ent;
1681 }
1682
1683 static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1684 {
1685         struct trace_array *tr = iter->tr;
1686         struct ring_buffer_event *event;
1687         struct ring_buffer_iter *buf_iter;
1688         unsigned long entries = 0;
1689         u64 ts;
1690
1691         tr->data[cpu]->skipped_entries = 0;
1692
1693         if (!iter->buffer_iter[cpu])
1694                 return;
1695
1696         buf_iter = iter->buffer_iter[cpu];
1697         ring_buffer_iter_reset(buf_iter);
1698
1699         /*
1700          * We could have the case with the max latency tracers
1701          * that a reset never took place on a cpu. This is evident
1702          * by the timestamp being before the start of the buffer.
1703          */
1704         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1705                 if (ts >= iter->tr->time_start)
1706                         break;
1707                 entries++;
1708                 ring_buffer_read(buf_iter, NULL);
1709         }
1710
1711         tr->data[cpu]->skipped_entries = entries;
1712 }
1713
1714 /*
1715  * The current tracer is copied to avoid a global locking
1716  * all around.
1717  */
1718 static void *s_start(struct seq_file *m, loff_t *pos)
1719 {
1720         struct trace_iterator *iter = m->private;
1721         static struct tracer *old_tracer;
1722         int cpu_file = iter->cpu_file;
1723         void *p = NULL;
1724         loff_t l = 0;
1725         int cpu;
1726
1727         /* copy the tracer to avoid using a global lock all around */
1728         mutex_lock(&trace_types_lock);
1729         if (unlikely(old_tracer != current_trace && current_trace)) {
1730                 old_tracer = current_trace;
1731                 *iter->trace = *current_trace;
1732         }
1733         mutex_unlock(&trace_types_lock);
1734
1735         atomic_inc(&trace_record_cmdline_disabled);
1736
1737         if (*pos != iter->pos) {
1738                 iter->ent = NULL;
1739                 iter->cpu = 0;
1740                 iter->idx = -1;
1741
1742                 ftrace_disable_cpu();
1743
1744                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1745                         for_each_tracing_cpu(cpu)
1746                                 tracing_iter_reset(iter, cpu);
1747                 } else
1748                         tracing_iter_reset(iter, cpu_file);
1749
1750                 ftrace_enable_cpu();
1751
1752                 iter->leftover = 0;
1753                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1754                         ;
1755
1756         } else {
1757                 /*
1758                  * If we overflowed the seq_file before, then we want
1759                  * to just reuse the trace_seq buffer again.
1760                  */
1761                 if (iter->leftover)
1762                         p = iter;
1763                 else {
1764                         l = *pos - 1;
1765                         p = s_next(m, p, &l);
1766                 }
1767         }
1768
1769         trace_event_read_lock();
1770         trace_access_lock(cpu_file);
1771         return p;
1772 }
1773
1774 static void s_stop(struct seq_file *m, void *p)
1775 {
1776         struct trace_iterator *iter = m->private;
1777
1778         atomic_dec(&trace_record_cmdline_disabled);
1779         trace_access_unlock(iter->cpu_file);
1780         trace_event_read_unlock();
1781 }
1782
1783 static void print_lat_help_header(struct seq_file *m)
1784 {
1785         seq_puts(m, "#                  _------=> CPU#            \n");
1786         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1787         seq_puts(m, "#                | / _----=> need-resched    \n");
1788         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1789         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1790         seq_puts(m, "#                |||| /_--=> lock-depth       \n");
1791         seq_puts(m, "#                |||||/     delay             \n");
1792         seq_puts(m, "#  cmd     pid   |||||| time  |   caller      \n");
1793         seq_puts(m, "#     \\   /      ||||||   \\   |   /           \n");
1794 }
1795
1796 static void print_func_help_header(struct seq_file *m)
1797 {
1798         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1799         seq_puts(m, "#              | |       |          |         |\n");
1800 }
1801
1802
1803 void
1804 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1805 {
1806         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1807         struct trace_array *tr = iter->tr;
1808         struct trace_array_cpu *data = tr->data[tr->cpu];
1809         struct tracer *type = current_trace;
1810         unsigned long entries = 0;
1811         unsigned long total = 0;
1812         unsigned long count;
1813         const char *name = "preemption";
1814         int cpu;
1815
1816         if (type)
1817                 name = type->name;
1818
1819
1820         for_each_tracing_cpu(cpu) {
1821                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1822                 /*
1823                  * If this buffer has skipped entries, then we hold all
1824                  * entries for the trace and we need to ignore the
1825                  * ones before the time stamp.
1826                  */
1827                 if (tr->data[cpu]->skipped_entries) {
1828                         count -= tr->data[cpu]->skipped_entries;
1829                         /* total is the same as the entries */
1830                         total += count;
1831                 } else
1832                         total += count +
1833                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
1834                 entries += count;
1835         }
1836
1837         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1838                    name, UTS_RELEASE);
1839         seq_puts(m, "# -----------------------------------"
1840                  "---------------------------------\n");
1841         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1842                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1843                    nsecs_to_usecs(data->saved_latency),
1844                    entries,
1845                    total,
1846                    tr->cpu,
1847 #if defined(CONFIG_PREEMPT_NONE)
1848                    "server",
1849 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1850                    "desktop",
1851 #elif defined(CONFIG_PREEMPT)
1852                    "preempt",
1853 #else
1854                    "unknown",
1855 #endif
1856                    /* These are reserved for later use */
1857                    0, 0, 0, 0);
1858 #ifdef CONFIG_SMP
1859         seq_printf(m, " #P:%d)\n", num_online_cpus());
1860 #else
1861         seq_puts(m, ")\n");
1862 #endif
1863         seq_puts(m, "#    -----------------\n");
1864         seq_printf(m, "#    | task: %.16s-%d "
1865                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1866                    data->comm, data->pid, data->uid, data->nice,
1867                    data->policy, data->rt_priority);
1868         seq_puts(m, "#    -----------------\n");
1869
1870         if (data->critical_start) {
1871                 seq_puts(m, "#  => started at: ");
1872                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1873                 trace_print_seq(m, &iter->seq);
1874                 seq_puts(m, "\n#  => ended at:   ");
1875                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1876                 trace_print_seq(m, &iter->seq);
1877                 seq_puts(m, "\n#\n");
1878         }
1879
1880         seq_puts(m, "#\n");
1881 }
1882
1883 static void test_cpu_buff_start(struct trace_iterator *iter)
1884 {
1885         struct trace_seq *s = &iter->seq;
1886
1887         if (!(trace_flags & TRACE_ITER_ANNOTATE))
1888                 return;
1889
1890         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1891                 return;
1892
1893         if (cpumask_test_cpu(iter->cpu, iter->started))
1894                 return;
1895
1896         if (iter->tr->data[iter->cpu]->skipped_entries)
1897                 return;
1898
1899         cpumask_set_cpu(iter->cpu, iter->started);
1900
1901         /* Don't print started cpu buffer for the first entry of the trace */
1902         if (iter->idx > 1)
1903                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1904                                 iter->cpu);
1905 }
1906
1907 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1908 {
1909         struct trace_seq *s = &iter->seq;
1910         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1911         struct trace_entry *entry;
1912         struct trace_event *event;
1913
1914         entry = iter->ent;
1915
1916         test_cpu_buff_start(iter);
1917
1918         event = ftrace_find_event(entry->type);
1919
1920         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1921                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1922                         if (!trace_print_lat_context(iter))
1923                                 goto partial;
1924                 } else {
1925                         if (!trace_print_context(iter))
1926                                 goto partial;
1927                 }
1928         }
1929
1930         if (event)
1931                 return event->funcs->trace(iter, sym_flags, event);
1932
1933         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1934                 goto partial;
1935
1936         return TRACE_TYPE_HANDLED;
1937 partial:
1938         return TRACE_TYPE_PARTIAL_LINE;
1939 }
1940
1941 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1942 {
1943         struct trace_seq *s = &iter->seq;
1944         struct trace_entry *entry;
1945         struct trace_event *event;
1946
1947         entry = iter->ent;
1948
1949         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1950                 if (!trace_seq_printf(s, "%d %d %llu ",
1951                                       entry->pid, iter->cpu, iter->ts))
1952                         goto partial;
1953         }
1954
1955         event = ftrace_find_event(entry->type);
1956         if (event)
1957                 return event->funcs->raw(iter, 0, event);
1958
1959         if (!trace_seq_printf(s, "%d ?\n", entry->type))
1960                 goto partial;
1961
1962         return TRACE_TYPE_HANDLED;
1963 partial:
1964         return TRACE_TYPE_PARTIAL_LINE;
1965 }
1966
1967 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1968 {
1969         struct trace_seq *s = &iter->seq;
1970         unsigned char newline = '\n';
1971         struct trace_entry *entry;
1972         struct trace_event *event;
1973
1974         entry = iter->ent;
1975
1976         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1977                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1978                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1979                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1980         }
1981
1982         event = ftrace_find_event(entry->type);
1983         if (event) {
1984                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
1985                 if (ret != TRACE_TYPE_HANDLED)
1986                         return ret;
1987         }
1988
1989         SEQ_PUT_FIELD_RET(s, newline);
1990
1991         return TRACE_TYPE_HANDLED;
1992 }
1993
1994 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1995 {
1996         struct trace_seq *s = &iter->seq;
1997         struct trace_entry *entry;
1998         struct trace_event *event;
1999
2000         entry = iter->ent;
2001
2002         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2003                 SEQ_PUT_FIELD_RET(s, entry->pid);
2004                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2005                 SEQ_PUT_FIELD_RET(s, iter->ts);
2006         }
2007
2008         event = ftrace_find_event(entry->type);
2009         return event ? event->funcs->binary(iter, 0, event) :
2010                 TRACE_TYPE_HANDLED;
2011 }
2012
2013 int trace_empty(struct trace_iterator *iter)
2014 {
2015         int cpu;
2016
2017         /* If we are looking at one CPU buffer, only check that one */
2018         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2019                 cpu = iter->cpu_file;
2020                 if (iter->buffer_iter[cpu]) {
2021                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2022                                 return 0;
2023                 } else {
2024                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2025                                 return 0;
2026                 }
2027                 return 1;
2028         }
2029
2030         for_each_tracing_cpu(cpu) {
2031                 if (iter->buffer_iter[cpu]) {
2032                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2033                                 return 0;
2034                 } else {
2035                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2036                                 return 0;
2037                 }
2038         }
2039
2040         return 1;
2041 }
2042
2043 /*  Called with trace_event_read_lock() held. */
2044 static enum print_line_t print_trace_line(struct trace_iterator *iter)
2045 {
2046         enum print_line_t ret;
2047
2048         if (iter->lost_events)
2049                 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2050                                  iter->cpu, iter->lost_events);
2051
2052         if (iter->trace && iter->trace->print_line) {
2053                 ret = iter->trace->print_line(iter);
2054                 if (ret != TRACE_TYPE_UNHANDLED)
2055                         return ret;
2056         }
2057
2058         if (iter->ent->type == TRACE_BPRINT &&
2059                         trace_flags & TRACE_ITER_PRINTK &&
2060                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2061                 return trace_print_bprintk_msg_only(iter);
2062
2063         if (iter->ent->type == TRACE_PRINT &&
2064                         trace_flags & TRACE_ITER_PRINTK &&
2065                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2066                 return trace_print_printk_msg_only(iter);
2067
2068         if (trace_flags & TRACE_ITER_BIN)
2069                 return print_bin_fmt(iter);
2070
2071         if (trace_flags & TRACE_ITER_HEX)
2072                 return print_hex_fmt(iter);
2073
2074         if (trace_flags & TRACE_ITER_RAW)
2075                 return print_raw_fmt(iter);
2076
2077         return print_trace_fmt(iter);
2078 }
2079
2080 void trace_default_header(struct seq_file *m)
2081 {
2082         struct trace_iterator *iter = m->private;
2083
2084         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2085                 /* print nothing if the buffers are empty */
2086                 if (trace_empty(iter))
2087                         return;
2088                 print_trace_header(m, iter);
2089                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2090                         print_lat_help_header(m);
2091         } else {
2092                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2093                         print_func_help_header(m);
2094         }
2095 }
2096
2097 static int s_show(struct seq_file *m, void *v)
2098 {
2099         struct trace_iterator *iter = v;
2100         int ret;
2101
2102         if (iter->ent == NULL) {
2103                 if (iter->tr) {
2104                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2105                         seq_puts(m, "#\n");
2106                 }
2107                 if (iter->trace && iter->trace->print_header)
2108                         iter->trace->print_header(m);
2109                 else
2110                         trace_default_header(m);
2111
2112         } else if (iter->leftover) {
2113                 /*
2114                  * If we filled the seq_file buffer earlier, we
2115                  * want to just show it now.
2116                  */
2117                 ret = trace_print_seq(m, &iter->seq);
2118
2119                 /* ret should this time be zero, but you never know */
2120                 iter->leftover = ret;
2121
2122         } else {
2123                 print_trace_line(iter);
2124                 ret = trace_print_seq(m, &iter->seq);
2125                 /*
2126                  * If we overflow the seq_file buffer, then it will
2127                  * ask us for this data again at start up.
2128                  * Use that instead.
2129                  *  ret is 0 if seq_file write succeeded.
2130                  *        -1 otherwise.
2131                  */
2132                 iter->leftover = ret;
2133         }
2134
2135         return 0;
2136 }
2137
2138 static const struct seq_operations tracer_seq_ops = {
2139         .start          = s_start,
2140         .next           = s_next,
2141         .stop           = s_stop,
2142         .show           = s_show,
2143 };
2144
2145 static struct trace_iterator *
2146 __tracing_open(struct inode *inode, struct file *file)
2147 {
2148         long cpu_file = (long) inode->i_private;
2149         void *fail_ret = ERR_PTR(-ENOMEM);
2150         struct trace_iterator *iter;
2151         struct seq_file *m;
2152         int cpu, ret;
2153
2154         if (tracing_disabled)
2155                 return ERR_PTR(-ENODEV);
2156
2157         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2158         if (!iter)
2159                 return ERR_PTR(-ENOMEM);
2160
2161         /*
2162          * We make a copy of the current tracer to avoid concurrent
2163          * changes on it while we are reading.
2164          */
2165         mutex_lock(&trace_types_lock);
2166         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2167         if (!iter->trace)
2168                 goto fail;
2169
2170         if (current_trace)
2171                 *iter->trace = *current_trace;
2172
2173         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2174                 goto fail;
2175
2176         if (current_trace && current_trace->print_max)
2177                 iter->tr = &max_tr;
2178         else
2179                 iter->tr = &global_trace;
2180         iter->pos = -1;
2181         mutex_init(&iter->mutex);
2182         iter->cpu_file = cpu_file;
2183
2184         /* Notify the tracer early; before we stop tracing. */
2185         if (iter->trace && iter->trace->open)
2186                 iter->trace->open(iter);
2187
2188         /* Annotate start of buffers if we had overruns */
2189         if (ring_buffer_overruns(iter->tr->buffer))
2190                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2191
2192         /* stop the trace while dumping */
2193         tracing_stop();
2194
2195         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2196                 for_each_tracing_cpu(cpu) {
2197                         iter->buffer_iter[cpu] =
2198                                 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2199                 }
2200                 ring_buffer_read_prepare_sync();
2201                 for_each_tracing_cpu(cpu) {
2202                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2203                         tracing_iter_reset(iter, cpu);
2204                 }
2205         } else {
2206                 cpu = iter->cpu_file;
2207                 iter->buffer_iter[cpu] =
2208                         ring_buffer_read_prepare(iter->tr->buffer, cpu);
2209                 ring_buffer_read_prepare_sync();
2210                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2211                 tracing_iter_reset(iter, cpu);
2212         }
2213
2214         ret = seq_open(file, &tracer_seq_ops);
2215         if (ret < 0) {
2216                 fail_ret = ERR_PTR(ret);
2217                 goto fail_buffer;
2218         }
2219
2220         m = file->private_data;
2221         m->private = iter;
2222
2223         mutex_unlock(&trace_types_lock);
2224
2225         return iter;
2226
2227  fail_buffer:
2228         for_each_tracing_cpu(cpu) {
2229                 if (iter->buffer_iter[cpu])
2230                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2231         }
2232         free_cpumask_var(iter->started);
2233         tracing_start();
2234  fail:
2235         mutex_unlock(&trace_types_lock);
2236         kfree(iter->trace);
2237         kfree(iter);
2238
2239         return fail_ret;
2240 }
2241
2242 int tracing_open_generic(struct inode *inode, struct file *filp)
2243 {
2244         if (tracing_disabled)
2245                 return -ENODEV;
2246
2247         filp->private_data = inode->i_private;
2248         return 0;
2249 }
2250
2251 static int tracing_release(struct inode *inode, struct file *file)
2252 {
2253         struct seq_file *m = (struct seq_file *)file->private_data;
2254         struct trace_iterator *iter;
2255         int cpu;
2256
2257         if (!(file->f_mode & FMODE_READ))
2258                 return 0;
2259
2260         iter = m->private;
2261
2262         mutex_lock(&trace_types_lock);
2263         for_each_tracing_cpu(cpu) {
2264                 if (iter->buffer_iter[cpu])
2265                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2266         }
2267
2268         if (iter->trace && iter->trace->close)
2269                 iter->trace->close(iter);
2270
2271         /* reenable tracing if it was previously enabled */
2272         tracing_start();
2273         mutex_unlock(&trace_types_lock);
2274
2275         seq_release(inode, file);
2276         mutex_destroy(&iter->mutex);
2277         free_cpumask_var(iter->started);
2278         kfree(iter->trace);
2279         kfree(iter);
2280         return 0;
2281 }
2282
2283 static int tracing_open(struct inode *inode, struct file *file)
2284 {
2285         struct trace_iterator *iter;
2286         int ret = 0;
2287
2288         /* If this file was open for write, then erase contents */
2289         if ((file->f_mode & FMODE_WRITE) &&
2290             (file->f_flags & O_TRUNC)) {
2291                 long cpu = (long) inode->i_private;
2292
2293                 if (cpu == TRACE_PIPE_ALL_CPU)
2294                         tracing_reset_online_cpus(&global_trace);
2295                 else
2296                         tracing_reset(&global_trace, cpu);
2297         }
2298
2299         if (file->f_mode & FMODE_READ) {
2300                 iter = __tracing_open(inode, file);
2301                 if (IS_ERR(iter))
2302                         ret = PTR_ERR(iter);
2303                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2304                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2305         }
2306         return ret;
2307 }
2308
2309 static void *
2310 t_next(struct seq_file *m, void *v, loff_t *pos)
2311 {
2312         struct tracer *t = v;
2313
2314         (*pos)++;
2315
2316         if (t)
2317                 t = t->next;
2318
2319         return t;
2320 }
2321
2322 static void *t_start(struct seq_file *m, loff_t *pos)
2323 {
2324         struct tracer *t;
2325         loff_t l = 0;
2326
2327         mutex_lock(&trace_types_lock);
2328         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2329                 ;
2330
2331         return t;
2332 }
2333
2334 static void t_stop(struct seq_file *m, void *p)
2335 {
2336         mutex_unlock(&trace_types_lock);
2337 }
2338
2339 static int t_show(struct seq_file *m, void *v)
2340 {
2341         struct tracer *t = v;
2342
2343         if (!t)
2344                 return 0;
2345
2346         seq_printf(m, "%s", t->name);
2347         if (t->next)
2348                 seq_putc(m, ' ');
2349         else
2350                 seq_putc(m, '\n');
2351
2352         return 0;
2353 }
2354
2355 static const struct seq_operations show_traces_seq_ops = {
2356         .start          = t_start,
2357         .next           = t_next,
2358         .stop           = t_stop,
2359         .show           = t_show,
2360 };
2361
2362 static int show_traces_open(struct inode *inode, struct file *file)
2363 {
2364         if (tracing_disabled)
2365                 return -ENODEV;
2366
2367         return seq_open(file, &show_traces_seq_ops);
2368 }
2369
2370 static ssize_t
2371 tracing_write_stub(struct file *filp, const char __user *ubuf,
2372                    size_t count, loff_t *ppos)
2373 {
2374         return count;
2375 }
2376
2377 static const struct file_operations tracing_fops = {
2378         .open           = tracing_open,
2379         .read           = seq_read,
2380         .write          = tracing_write_stub,
2381         .llseek         = seq_lseek,
2382         .release        = tracing_release,
2383 };
2384
2385 static const struct file_operations show_traces_fops = {
2386         .open           = show_traces_open,
2387         .read           = seq_read,
2388         .release        = seq_release,
2389 };
2390
2391 /*
2392  * Only trace on a CPU if the bitmask is set:
2393  */
2394 static cpumask_var_t tracing_cpumask;
2395
2396 /*
2397  * The tracer itself will not take this lock, but still we want
2398  * to provide a consistent cpumask to user-space:
2399  */
2400 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2401
2402 /*
2403  * Temporary storage for the character representation of the
2404  * CPU bitmask (and one more byte for the newline):
2405  */
2406 static char mask_str[NR_CPUS + 1];
2407
2408 static ssize_t
2409 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2410                      size_t count, loff_t *ppos)
2411 {
2412         int len;
2413
2414         mutex_lock(&tracing_cpumask_update_lock);
2415
2416         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2417         if (count - len < 2) {
2418                 count = -EINVAL;
2419                 goto out_err;
2420         }
2421         len += sprintf(mask_str + len, "\n");
2422         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2423
2424 out_err:
2425         mutex_unlock(&tracing_cpumask_update_lock);
2426
2427         return count;
2428 }
2429
2430 static ssize_t
2431 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2432                       size_t count, loff_t *ppos)
2433 {
2434         int err, cpu;
2435         cpumask_var_t tracing_cpumask_new;
2436
2437         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2438                 return -ENOMEM;
2439
2440         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2441         if (err)
2442                 goto err_unlock;
2443
2444         mutex_lock(&tracing_cpumask_update_lock);
2445
2446         local_irq_disable();
2447         arch_spin_lock(&ftrace_max_lock);
2448         for_each_tracing_cpu(cpu) {
2449                 /*
2450                  * Increase/decrease the disabled counter if we are
2451                  * about to flip a bit in the cpumask:
2452                  */
2453                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2454                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2455                         atomic_inc(&global_trace.data[cpu]->disabled);
2456                 }
2457                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2458                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2459                         atomic_dec(&global_trace.data[cpu]->disabled);
2460                 }
2461         }
2462         arch_spin_unlock(&ftrace_max_lock);
2463         local_irq_enable();
2464
2465         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2466
2467         mutex_unlock(&tracing_cpumask_update_lock);
2468         free_cpumask_var(tracing_cpumask_new);
2469
2470         return count;
2471
2472 err_unlock:
2473         free_cpumask_var(tracing_cpumask_new);
2474
2475         return err;
2476 }
2477
2478 static const struct file_operations tracing_cpumask_fops = {
2479         .open           = tracing_open_generic,
2480         .read           = tracing_cpumask_read,
2481         .write          = tracing_cpumask_write,
2482 };
2483
2484 static int tracing_trace_options_show(struct seq_file *m, void *v)
2485 {
2486         struct tracer_opt *trace_opts;
2487         u32 tracer_flags;
2488         int i;
2489
2490         mutex_lock(&trace_types_lock);
2491         tracer_flags = current_trace->flags->val;
2492         trace_opts = current_trace->flags->opts;
2493
2494         for (i = 0; trace_options[i]; i++) {
2495                 if (trace_flags & (1 << i))
2496                         seq_printf(m, "%s\n", trace_options[i]);
2497                 else
2498                         seq_printf(m, "no%s\n", trace_options[i]);
2499         }
2500
2501         for (i = 0; trace_opts[i].name; i++) {
2502                 if (tracer_flags & trace_opts[i].bit)
2503                         seq_printf(m, "%s\n", trace_opts[i].name);
2504                 else
2505                         seq_printf(m, "no%s\n", trace_opts[i].name);
2506         }
2507         mutex_unlock(&trace_types_lock);
2508
2509         return 0;
2510 }
2511
2512 static int __set_tracer_option(struct tracer *trace,
2513                                struct tracer_flags *tracer_flags,
2514                                struct tracer_opt *opts, int neg)
2515 {
2516         int ret;
2517
2518         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2519         if (ret)
2520                 return ret;
2521
2522         if (neg)
2523                 tracer_flags->val &= ~opts->bit;
2524         else
2525                 tracer_flags->val |= opts->bit;
2526         return 0;
2527 }
2528
2529 /* Try to assign a tracer specific option */
2530 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2531 {
2532         struct tracer_flags *tracer_flags = trace->flags;
2533         struct tracer_opt *opts = NULL;
2534         int i;
2535
2536         for (i = 0; tracer_flags->opts[i].name; i++) {
2537                 opts = &tracer_flags->opts[i];
2538
2539                 if (strcmp(cmp, opts->name) == 0)
2540                         return __set_tracer_option(trace, trace->flags,
2541                                                    opts, neg);
2542         }
2543
2544         return -EINVAL;
2545 }
2546
2547 static void set_tracer_flags(unsigned int mask, int enabled)
2548 {
2549         /* do nothing if flag is already set */
2550         if (!!(trace_flags & mask) == !!enabled)
2551                 return;
2552
2553         if (enabled)
2554                 trace_flags |= mask;
2555         else
2556                 trace_flags &= ~mask;
2557 }
2558
2559 static ssize_t
2560 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2561                         size_t cnt, loff_t *ppos)
2562 {
2563         char buf[64];
2564         char *cmp;
2565         int neg = 0;
2566         int ret;
2567         int i;
2568
2569         if (cnt >= sizeof(buf))
2570                 return -EINVAL;
2571
2572         if (copy_from_user(&buf, ubuf, cnt))
2573                 return -EFAULT;
2574
2575         buf[cnt] = 0;
2576         cmp = strstrip(buf);
2577
2578         if (strncmp(cmp, "no", 2) == 0) {
2579                 neg = 1;
2580                 cmp += 2;
2581         }
2582
2583         for (i = 0; trace_options[i]; i++) {
2584                 if (strcmp(cmp, trace_options[i]) == 0) {
2585                         set_tracer_flags(1 << i, !neg);
2586                         break;
2587                 }
2588         }
2589
2590         /* If no option could be set, test the specific tracer options */
2591         if (!trace_options[i]) {
2592                 mutex_lock(&trace_types_lock);
2593                 ret = set_tracer_option(current_trace, cmp, neg);
2594                 mutex_unlock(&trace_types_lock);
2595                 if (ret)
2596                         return ret;
2597         }
2598
2599         *ppos += cnt;
2600
2601         return cnt;
2602 }
2603
2604 static int tracing_trace_options_open(struct inode *inode, struct file *file)
2605 {
2606         if (tracing_disabled)
2607                 return -ENODEV;
2608         return single_open(file, tracing_trace_options_show, NULL);
2609 }
2610
2611 static const struct file_operations tracing_iter_fops = {
2612         .open           = tracing_trace_options_open,
2613         .read           = seq_read,
2614         .llseek         = seq_lseek,
2615         .release        = single_release,
2616         .write          = tracing_trace_options_write,
2617 };
2618
2619 static const char readme_msg[] =
2620         "tracing mini-HOWTO:\n\n"
2621         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2622         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2623         "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2624         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2625         "nop\n"
2626         "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2627         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2628         "sched_switch\n"
2629         "# cat /sys/kernel/debug/tracing/trace_options\n"
2630         "noprint-parent nosym-offset nosym-addr noverbose\n"
2631         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2632         "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
2633         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2634         "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
2635 ;
2636
2637 static ssize_t
2638 tracing_readme_read(struct file *filp, char __user *ubuf,
2639                        size_t cnt, loff_t *ppos)
2640 {
2641         return simple_read_from_buffer(ubuf, cnt, ppos,
2642                                         readme_msg, strlen(readme_msg));
2643 }
2644
2645 static const struct file_operations tracing_readme_fops = {
2646         .open           = tracing_open_generic,
2647         .read           = tracing_readme_read,
2648 };
2649
2650 static ssize_t
2651 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2652                                 size_t cnt, loff_t *ppos)
2653 {
2654         char *buf_comm;
2655         char *file_buf;
2656         char *buf;
2657         int len = 0;
2658         int pid;
2659         int i;
2660
2661         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2662         if (!file_buf)
2663                 return -ENOMEM;
2664
2665         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2666         if (!buf_comm) {
2667                 kfree(file_buf);
2668                 return -ENOMEM;
2669         }
2670
2671         buf = file_buf;
2672
2673         for (i = 0; i < SAVED_CMDLINES; i++) {
2674                 int r;
2675
2676                 pid = map_cmdline_to_pid[i];
2677                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2678                         continue;
2679
2680                 trace_find_cmdline(pid, buf_comm);
2681                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2682                 buf += r;
2683                 len += r;
2684         }
2685
2686         len = simple_read_from_buffer(ubuf, cnt, ppos,
2687                                       file_buf, len);
2688
2689         kfree(file_buf);
2690         kfree(buf_comm);
2691
2692         return len;
2693 }
2694
2695 static const struct file_operations tracing_saved_cmdlines_fops = {
2696     .open       = tracing_open_generic,
2697     .read       = tracing_saved_cmdlines_read,
2698 };
2699
2700 static ssize_t
2701 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2702                   size_t cnt, loff_t *ppos)
2703 {
2704         char buf[64];
2705         int r;
2706
2707         r = sprintf(buf, "%u\n", tracer_enabled);
2708         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2709 }
2710
2711 static ssize_t
2712 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2713                    size_t cnt, loff_t *ppos)
2714 {
2715         struct trace_array *tr = filp->private_data;
2716         char buf[64];
2717         unsigned long val;
2718         int ret;
2719
2720         if (cnt >= sizeof(buf))
2721                 return -EINVAL;
2722
2723         if (copy_from_user(&buf, ubuf, cnt))
2724                 return -EFAULT;
2725
2726         buf[cnt] = 0;
2727
2728         ret = strict_strtoul(buf, 10, &val);
2729         if (ret < 0)
2730                 return ret;
2731
2732         val = !!val;
2733
2734         mutex_lock(&trace_types_lock);
2735         if (tracer_enabled ^ val) {
2736                 if (val) {
2737                         tracer_enabled = 1;
2738                         if (current_trace->start)
2739                                 current_trace->start(tr);
2740                         tracing_start();
2741                 } else {
2742                         tracer_enabled = 0;
2743                         tracing_stop();
2744                         if (current_trace->stop)
2745                                 current_trace->stop(tr);
2746                 }
2747         }
2748         mutex_unlock(&trace_types_lock);
2749
2750         *ppos += cnt;
2751
2752         return cnt;
2753 }
2754
2755 static ssize_t
2756 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2757                        size_t cnt, loff_t *ppos)
2758 {
2759         char buf[MAX_TRACER_SIZE+2];
2760         int r;
2761
2762         mutex_lock(&trace_types_lock);
2763         if (current_trace)
2764                 r = sprintf(buf, "%s\n", current_trace->name);
2765         else
2766                 r = sprintf(buf, "\n");
2767         mutex_unlock(&trace_types_lock);
2768
2769         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2770 }
2771
2772 int tracer_init(struct tracer *t, struct trace_array *tr)
2773 {
2774         tracing_reset_online_cpus(tr);
2775         return t->init(tr);
2776 }
2777
2778 static int tracing_resize_ring_buffer(unsigned long size)
2779 {
2780         int ret;
2781
2782         /*
2783          * If kernel or user changes the size of the ring buffer
2784          * we use the size that was given, and we can forget about
2785          * expanding it later.
2786          */
2787         ring_buffer_expanded = 1;
2788
2789         ret = ring_buffer_resize(global_trace.buffer, size);
2790         if (ret < 0)
2791                 return ret;
2792
2793         ret = ring_buffer_resize(max_tr.buffer, size);
2794         if (ret < 0) {
2795                 int r;
2796
2797                 r = ring_buffer_resize(global_trace.buffer,
2798                                        global_trace.entries);
2799                 if (r < 0) {
2800                         /*
2801                          * AARGH! We are left with different
2802                          * size max buffer!!!!
2803                          * The max buffer is our "snapshot" buffer.
2804                          * When a tracer needs a snapshot (one of the
2805                          * latency tracers), it swaps the max buffer
2806                          * with the saved snap shot. We succeeded to
2807                          * update the size of the main buffer, but failed to
2808                          * update the size of the max buffer. But when we tried
2809                          * to reset the main buffer to the original size, we
2810                          * failed there too. This is very unlikely to
2811                          * happen, but if it does, warn and kill all
2812                          * tracing.
2813                          */
2814                         WARN_ON(1);
2815                         tracing_disabled = 1;
2816                 }
2817                 return ret;
2818         }
2819
2820         global_trace.entries = size;
2821
2822         return ret;
2823 }
2824
2825 /**
2826  * tracing_update_buffers - used by tracing facility to expand ring buffers
2827  *
2828  * To save on memory when the tracing is never used on a system with it
2829  * configured in. The ring buffers are set to a minimum size. But once
2830  * a user starts to use the tracing facility, then they need to grow
2831  * to their default size.
2832  *
2833  * This function is to be called when a tracer is about to be used.
2834  */
2835 int tracing_update_buffers(void)
2836 {
2837         int ret = 0;
2838
2839         mutex_lock(&trace_types_lock);
2840         if (!ring_buffer_expanded)
2841                 ret = tracing_resize_ring_buffer(trace_buf_size);
2842         mutex_unlock(&trace_types_lock);
2843
2844         return ret;
2845 }
2846
2847 struct trace_option_dentry;
2848
2849 static struct trace_option_dentry *
2850 create_trace_option_files(struct tracer *tracer);
2851
2852 static void
2853 destroy_trace_option_files(struct trace_option_dentry *topts);
2854
2855 static int tracing_set_tracer(const char *buf)
2856 {
2857         static struct trace_option_dentry *topts;
2858         struct trace_array *tr = &global_trace;
2859         struct tracer *t;
2860         int ret = 0;
2861
2862         mutex_lock(&trace_types_lock);
2863
2864         if (!ring_buffer_expanded) {
2865                 ret = tracing_resize_ring_buffer(trace_buf_size);
2866                 if (ret < 0)
2867                         goto out;
2868                 ret = 0;
2869         }
2870
2871         for (t = trace_types; t; t = t->next) {
2872                 if (strcmp(t->name, buf) == 0)
2873                         break;
2874         }
2875         if (!t) {
2876                 ret = -EINVAL;
2877                 goto out;
2878         }
2879         if (t == current_trace)
2880                 goto out;
2881
2882         trace_branch_disable();
2883         if (current_trace && current_trace->reset)
2884                 current_trace->reset(tr);
2885
2886         destroy_trace_option_files(topts);
2887
2888         current_trace = t;
2889
2890         topts = create_trace_option_files(current_trace);
2891
2892         if (t->init) {
2893                 ret = tracer_init(t, tr);
2894                 if (ret)
2895                         goto out;
2896         }
2897
2898         trace_branch_enable(tr);
2899  out:
2900         mutex_unlock(&trace_types_lock);
2901
2902         return ret;
2903 }
2904
2905 static ssize_t
2906 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2907                         size_t cnt, loff_t *ppos)
2908 {
2909         char buf[MAX_TRACER_SIZE+1];
2910         int i;
2911         size_t ret;
2912         int err;
2913
2914         ret = cnt;
2915
2916         if (cnt > MAX_TRACER_SIZE)
2917                 cnt = MAX_TRACER_SIZE;
2918
2919         if (copy_from_user(&buf, ubuf, cnt))
2920                 return -EFAULT;
2921
2922         buf[cnt] = 0;
2923
2924         /* strip ending whitespace. */
2925         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2926                 buf[i] = 0;
2927
2928         err = tracing_set_tracer(buf);
2929         if (err)
2930                 return err;
2931
2932         *ppos += ret;
2933
2934         return ret;
2935 }
2936
2937 static ssize_t
2938 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2939                      size_t cnt, loff_t *ppos)
2940 {
2941         unsigned long *ptr = filp->private_data;
2942         char buf[64];
2943         int r;
2944
2945         r = snprintf(buf, sizeof(buf), "%ld\n",
2946                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2947         if (r > sizeof(buf))
2948                 r = sizeof(buf);
2949         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2950 }
2951
2952 static ssize_t
2953 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2954                       size_t cnt, loff_t *ppos)
2955 {
2956         unsigned long *ptr = filp->private_data;
2957         char buf[64];
2958         unsigned long val;
2959         int ret;
2960
2961         if (cnt >= sizeof(buf))
2962                 return -EINVAL;
2963
2964         if (copy_from_user(&buf, ubuf, cnt))
2965                 return -EFAULT;
2966
2967         buf[cnt] = 0;
2968
2969         ret = strict_strtoul(buf, 10, &val);
2970         if (ret < 0)
2971                 return ret;
2972
2973         *ptr = val * 1000;
2974
2975         return cnt;
2976 }
2977
2978 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2979 {
2980         long cpu_file = (long) inode->i_private;
2981         struct trace_iterator *iter;
2982         int ret = 0;
2983
2984         if (tracing_disabled)
2985                 return -ENODEV;
2986
2987         mutex_lock(&trace_types_lock);
2988
2989         /* create a buffer to store the information to pass to userspace */
2990         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2991         if (!iter) {
2992                 ret = -ENOMEM;
2993                 goto out;
2994         }
2995
2996         /*
2997          * We make a copy of the current tracer to avoid concurrent
2998          * changes on it while we are reading.
2999          */
3000         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3001         if (!iter->trace) {
3002                 ret = -ENOMEM;
3003                 goto fail;
3004         }
3005         if (current_trace)
3006                 *iter->trace = *current_trace;
3007
3008         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3009                 ret = -ENOMEM;
3010                 goto fail;
3011         }
3012
3013         /* trace pipe does not show start of buffer */
3014         cpumask_setall(iter->started);
3015
3016         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3017                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3018
3019         iter->cpu_file = cpu_file;
3020         iter->tr = &global_trace;
3021         mutex_init(&iter->mutex);
3022         filp->private_data = iter;
3023
3024         if (iter->trace->pipe_open)
3025                 iter->trace->pipe_open(iter);
3026
3027 out:
3028         mutex_unlock(&trace_types_lock);
3029         return ret;
3030
3031 fail:
3032         kfree(iter->trace);
3033         kfree(iter);
3034         mutex_unlock(&trace_types_lock);
3035         return ret;
3036 }
3037
3038 static int tracing_release_pipe(struct inode *inode, struct file *file)
3039 {
3040         struct trace_iterator *iter = file->private_data;
3041
3042         mutex_lock(&trace_types_lock);
3043
3044         if (iter->trace->pipe_close)
3045                 iter->trace->pipe_close(iter);
3046
3047         mutex_unlock(&trace_types_lock);
3048
3049         free_cpumask_var(iter->started);
3050         mutex_destroy(&iter->mutex);
3051         kfree(iter->trace);
3052         kfree(iter);
3053
3054         return 0;
3055 }
3056
3057 static unsigned int
3058 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3059 {
3060         struct trace_iterator *iter = filp->private_data;
3061
3062         if (trace_flags & TRACE_ITER_BLOCK) {
3063                 /*
3064                  * Always select as readable when in blocking mode
3065                  */
3066                 return POLLIN | POLLRDNORM;
3067         } else {
3068                 if (!trace_empty(iter))
3069                         return POLLIN | POLLRDNORM;
3070                 poll_wait(filp, &trace_wait, poll_table);
3071                 if (!trace_empty(iter))
3072                         return POLLIN | POLLRDNORM;
3073
3074                 return 0;
3075         }
3076 }
3077
3078
3079 void default_wait_pipe(struct trace_iterator *iter)
3080 {
3081         DEFINE_WAIT(wait);
3082
3083         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3084
3085         if (trace_empty(iter))
3086                 schedule();
3087
3088         finish_wait(&trace_wait, &wait);
3089 }
3090
3091 /*
3092  * This is a make-shift waitqueue.
3093  * A tracer might use this callback on some rare cases:
3094  *
3095  *  1) the current tracer might hold the runqueue lock when it wakes up
3096  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3097  *  2) the function tracers, trace all functions, we don't want
3098  *     the overhead of calling wake_up and friends
3099  *     (and tracing them too)
3100  *
3101  *     Anyway, this is really very primitive wakeup.
3102  */
3103 void poll_wait_pipe(struct trace_iterator *iter)
3104 {
3105         set_current_state(TASK_INTERRUPTIBLE);
3106         /* sleep for 100 msecs, and try again. */
3107         schedule_timeout(HZ / 10);
3108 }
3109
3110 /* Must be called with trace_types_lock mutex held. */
3111 static int tracing_wait_pipe(struct file *filp)
3112 {
3113         struct trace_iterator *iter = filp->private_data;
3114
3115         while (trace_empty(iter)) {
3116
3117                 if ((filp->f_flags & O_NONBLOCK)) {
3118                         return -EAGAIN;
3119                 }
3120
3121                 mutex_unlock(&iter->mutex);
3122
3123                 iter->trace->wait_pipe(iter);
3124
3125                 mutex_lock(&iter->mutex);
3126
3127                 if (signal_pending(current))
3128                         return -EINTR;
3129
3130                 /*
3131                  * We block until we read something and tracing is disabled.
3132                  * We still block if tracing is disabled, but we have never
3133                  * read anything. This allows a user to cat this file, and
3134                  * then enable tracing. But after we have read something,
3135                  * we give an EOF when tracing is again disabled.
3136                  *
3137                  * iter->pos will be 0 if we haven't read anything.
3138                  */
3139                 if (!tracer_enabled && iter->pos)
3140                         break;
3141         }
3142
3143         return 1;
3144 }
3145
3146 /*
3147  * Consumer reader.
3148  */
3149 static ssize_t
3150 tracing_read_pipe(struct file *filp, char __user *ubuf,
3151                   size_t cnt, loff_t *ppos)
3152 {
3153         struct trace_iterator *iter = filp->private_data;
3154         static struct tracer *old_tracer;
3155         ssize_t sret;
3156
3157         /* return any leftover data */
3158         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3159         if (sret != -EBUSY)
3160                 return sret;
3161
3162         trace_seq_init(&iter->seq);
3163
3164         /* copy the tracer to avoid using a global lock all around */
3165         mutex_lock(&trace_types_lock);
3166         if (unlikely(old_tracer != current_trace && current_trace)) {
3167                 old_tracer = current_trace;
3168                 *iter->trace = *current_trace;
3169         }
3170         mutex_unlock(&trace_types_lock);
3171
3172         /*
3173          * Avoid more than one consumer on a single file descriptor
3174          * This is just a matter of traces coherency, the ring buffer itself
3175          * is protected.
3176          */
3177         mutex_lock(&iter->mutex);
3178         if (iter->trace->read) {
3179                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3180                 if (sret)
3181                         goto out;
3182         }
3183
3184 waitagain:
3185         sret = tracing_wait_pipe(filp);
3186         if (sret <= 0)
3187                 goto out;
3188
3189         /* stop when tracing is finished */
3190         if (trace_empty(iter)) {
3191                 sret = 0;
3192                 goto out;
3193         }
3194
3195         if (cnt >= PAGE_SIZE)
3196                 cnt = PAGE_SIZE - 1;
3197
3198         /* reset all but tr, trace, and overruns */
3199         memset(&iter->seq, 0,
3200                sizeof(struct trace_iterator) -
3201                offsetof(struct trace_iterator, seq));
3202         iter->pos = -1;
3203
3204         trace_event_read_lock();
3205         trace_access_lock(iter->cpu_file);
3206         while (find_next_entry_inc(iter) != NULL) {
3207                 enum print_line_t ret;
3208                 int len = iter->seq.len;
3209
3210                 ret = print_trace_line(iter);
3211                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3212                         /* don't print partial lines */
3213                         iter->seq.len = len;
3214                         break;
3215                 }
3216                 if (ret != TRACE_TYPE_NO_CONSUME)
3217                         trace_consume(iter);
3218
3219                 if (iter->seq.len >= cnt)
3220                         break;
3221         }
3222         trace_access_unlock(iter->cpu_file);
3223         trace_event_read_unlock();
3224
3225         /* Now copy what we have to the user */
3226         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3227         if (iter->seq.readpos >= iter->seq.len)
3228                 trace_seq_init(&iter->seq);
3229
3230         /*
3231          * If there was nothing to send to user, inspite of consuming trace
3232          * entries, go back to wait for more entries.
3233          */
3234         if (sret == -EBUSY)
3235                 goto waitagain;
3236
3237 out:
3238         mutex_unlock(&iter->mutex);
3239
3240         return sret;
3241 }
3242
3243 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3244                                      struct pipe_buffer *buf)
3245 {
3246         __free_page(buf->page);
3247 }
3248
3249 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3250                                      unsigned int idx)
3251 {
3252         __free_page(spd->pages[idx]);
3253 }
3254
3255 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3256         .can_merge              = 0,
3257         .map                    = generic_pipe_buf_map,
3258         .unmap                  = generic_pipe_buf_unmap,
3259         .confirm                = generic_pipe_buf_confirm,
3260         .release                = tracing_pipe_buf_release,
3261         .steal                  = generic_pipe_buf_steal,
3262         .get                    = generic_pipe_buf_get,
3263 };
3264
3265 static size_t
3266 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3267 {
3268         size_t count;
3269         int ret;
3270
3271         /* Seq buffer is page-sized, exactly what we need. */
3272         for (;;) {
3273                 count = iter->seq.len;
3274                 ret = print_trace_line(iter);
3275                 count = iter->seq.len - count;
3276                 if (rem < count) {
3277                         rem = 0;
3278                         iter->seq.len -= count;
3279                         break;
3280                 }
3281                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3282                         iter->seq.len -= count;
3283                         break;
3284                 }
3285
3286                 if (ret != TRACE_TYPE_NO_CONSUME)
3287                         trace_consume(iter);
3288                 rem -= count;
3289                 if (!find_next_entry_inc(iter)) {
3290                         rem = 0;
3291                         iter->ent = NULL;
3292                         break;
3293                 }
3294         }
3295
3296         return rem;
3297 }
3298
3299 static ssize_t tracing_splice_read_pipe(struct file *filp,
3300                                         loff_t *ppos,
3301                                         struct pipe_inode_info *pipe,
3302                                         size_t len,
3303                                         unsigned int flags)
3304 {
3305         struct page *pages_def[PIPE_DEF_BUFFERS];
3306         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3307         struct trace_iterator *iter = filp->private_data;
3308         struct splice_pipe_desc spd = {
3309                 .pages          = pages_def,
3310                 .partial        = partial_def,
3311                 .nr_pages       = 0, /* This gets updated below. */
3312                 .flags          = flags,
3313                 .ops            = &tracing_pipe_buf_ops,
3314                 .spd_release    = tracing_spd_release_pipe,
3315         };
3316         static struct tracer *old_tracer;
3317         ssize_t ret;
3318         size_t rem;
3319         unsigned int i;
3320
3321         if (splice_grow_spd(pipe, &spd))
3322                 return -ENOMEM;
3323
3324         /* copy the tracer to avoid using a global lock all around */
3325         mutex_lock(&trace_types_lock);
3326         if (unlikely(old_tracer != current_trace && current_trace)) {
3327                 old_tracer = current_trace;
3328                 *iter->trace = *current_trace;
3329         }
3330         mutex_unlock(&trace_types_lock);
3331
3332         mutex_lock(&iter->mutex);
3333
3334         if (iter->trace->splice_read) {
3335                 ret = iter->trace->splice_read(iter, filp,
3336                                                ppos, pipe, len, flags);
3337                 if (ret)
3338                         goto out_err;
3339         }
3340
3341         ret = tracing_wait_pipe(filp);
3342         if (ret <= 0)
3343                 goto out_err;
3344
3345         if (!iter->ent && !find_next_entry_inc(iter)) {
3346                 ret = -EFAULT;
3347                 goto out_err;
3348         }
3349
3350         trace_event_read_lock();
3351         trace_access_lock(iter->cpu_file);
3352
3353         /* Fill as many pages as possible. */
3354         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3355                 spd.pages[i] = alloc_page(GFP_KERNEL);
3356                 if (!spd.pages[i])
3357                         break;
3358
3359                 rem = tracing_fill_pipe_page(rem, iter);
3360
3361                 /* Copy the data into the page, so we can start over. */
3362                 ret = trace_seq_to_buffer(&iter->seq,
3363                                           page_address(spd.pages[i]),
3364                                           iter->seq.len);
3365                 if (ret < 0) {
3366                         __free_page(spd.pages[i]);
3367                         break;
3368                 }
3369                 spd.partial[i].offset = 0;
3370                 spd.partial[i].len = iter->seq.len;
3371
3372                 trace_seq_init(&iter->seq);
3373         }
3374
3375         trace_access_unlock(iter->cpu_file);
3376         trace_event_read_unlock();
3377         mutex_unlock(&iter->mutex);
3378
3379         spd.nr_pages = i;
3380
3381         ret = splice_to_pipe(pipe, &spd);
3382 out:
3383         splice_shrink_spd(pipe, &spd);
3384         return ret;
3385
3386 out_err:
3387         mutex_unlock(&iter->mutex);
3388         goto out;
3389 }
3390
3391 static ssize_t
3392 tracing_entries_read(struct file *filp, char __user *ubuf,
3393                      size_t cnt, loff_t *ppos)
3394 {
3395         struct trace_array *tr = filp->private_data;
3396         char buf[96];
3397         int r;
3398
3399         mutex_lock(&trace_types_lock);
3400         if (!ring_buffer_expanded)
3401                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3402                             tr->entries >> 10,
3403                             trace_buf_size >> 10);
3404         else
3405                 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3406         mutex_unlock(&trace_types_lock);
3407
3408         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3409 }
3410
3411 static ssize_t
3412 tracing_entries_write(struct file *filp, const char __user *ubuf,
3413                       size_t cnt, loff_t *ppos)
3414 {
3415         unsigned long val;
3416         char buf[64];
3417         int ret, cpu;
3418
3419         if (cnt >= sizeof(buf))
3420                 return -EINVAL;
3421
3422         if (copy_from_user(&buf, ubuf, cnt))
3423                 return -EFAULT;
3424
3425         buf[cnt] = 0;
3426
3427         ret = strict_strtoul(buf, 10, &val);
3428         if (ret < 0)
3429                 return ret;
3430
3431         /* must have at least 1 entry */
3432         if (!val)
3433                 return -EINVAL;
3434
3435         mutex_lock(&trace_types_lock);
3436
3437         tracing_stop();
3438
3439         /* disable all cpu buffers */
3440         for_each_tracing_cpu(cpu) {
3441                 if (global_trace.data[cpu])
3442                         atomic_inc(&global_trace.data[cpu]->disabled);
3443                 if (max_tr.data[cpu])
3444                         atomic_inc(&max_tr.data[cpu]->disabled);
3445         }
3446
3447         /* value is in KB */
3448         val <<= 10;
3449
3450         if (val != global_trace.entries) {
3451                 ret = tracing_resize_ring_buffer(val);
3452                 if (ret < 0) {
3453                         cnt = ret;
3454                         goto out;
3455                 }
3456         }
3457
3458         *ppos += cnt;
3459
3460         /* If check pages failed, return ENOMEM */
3461         if (tracing_disabled)
3462                 cnt = -ENOMEM;
3463  out:
3464         for_each_tracing_cpu(cpu) {
3465                 if (global_trace.data[cpu])
3466                         atomic_dec(&global_trace.data[cpu]->disabled);
3467                 if (max_tr.data[cpu])
3468                         atomic_dec(&max_tr.data[cpu]->disabled);
3469         }
3470
3471         tracing_start();
3472         max_tr.entries = global_trace.entries;
3473         mutex_unlock(&trace_types_lock);
3474
3475         return cnt;
3476 }
3477
3478 static int mark_printk(const char *fmt, ...)
3479 {
3480         int ret;
3481         va_list args;
3482         va_start(args, fmt);
3483         ret = trace_vprintk(0, fmt, args);
3484         va_end(args);
3485         return ret;
3486 }
3487
3488 static ssize_t
3489 tracing_mark_write(struct file *filp, const char __user *ubuf,
3490                                         size_t cnt, loff_t *fpos)
3491 {
3492         char *buf;
3493
3494         if (tracing_disabled)
3495                 return -EINVAL;
3496
3497         if (cnt > TRACE_BUF_SIZE)
3498                 cnt = TRACE_BUF_SIZE;
3499
3500         buf = kmalloc(cnt + 2, GFP_KERNEL);
3501         if (buf == NULL)
3502                 return -ENOMEM;
3503
3504         if (copy_from_user(buf, ubuf, cnt)) {
3505                 kfree(buf);
3506                 return -EFAULT;
3507         }
3508         if (buf[cnt-1] != '\n') {
3509                 buf[cnt] = '\n';
3510                 buf[cnt+1] = '\0';
3511         } else
3512                 buf[cnt] = '\0';
3513
3514         cnt = mark_printk("%s", buf);
3515         kfree(buf);
3516         *fpos += cnt;
3517
3518         return cnt;
3519 }
3520
3521 static int tracing_clock_show(struct seq_file *m, void *v)
3522 {
3523         int i;
3524
3525         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3526                 seq_printf(m,
3527                         "%s%s%s%s", i ? " " : "",
3528                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3529                         i == trace_clock_id ? "]" : "");
3530         seq_putc(m, '\n');
3531
3532         return 0;
3533 }
3534
3535 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3536                                    size_t cnt, loff_t *fpos)
3537 {
3538         char buf[64];
3539         const char *clockstr;
3540         int i;
3541
3542         if (cnt >= sizeof(buf))
3543                 return -EINVAL;
3544
3545         if (copy_from_user(&buf, ubuf, cnt))
3546                 return -EFAULT;
3547
3548         buf[cnt] = 0;
3549
3550         clockstr = strstrip(buf);
3551
3552         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3553                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3554                         break;
3555         }
3556         if (i == ARRAY_SIZE(trace_clocks))
3557                 return -EINVAL;
3558
3559         trace_clock_id = i;
3560
3561         mutex_lock(&trace_types_lock);
3562
3563         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3564         if (max_tr.buffer)
3565                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3566
3567         mutex_unlock(&trace_types_lock);
3568
3569         *fpos += cnt;
3570
3571         return cnt;
3572 }
3573
3574 static int tracing_clock_open(struct inode *inode, struct file *file)
3575 {
3576         if (tracing_disabled)
3577                 return -ENODEV;
3578         return single_open(file, tracing_clock_show, NULL);
3579 }
3580
3581 static const struct file_operations tracing_max_lat_fops = {
3582         .open           = tracing_open_generic,
3583         .read           = tracing_max_lat_read,
3584         .write          = tracing_max_lat_write,
3585 };
3586
3587 static const struct file_operations tracing_ctrl_fops = {
3588         .open           = tracing_open_generic,
3589         .read           = tracing_ctrl_read,
3590         .write          = tracing_ctrl_write,
3591 };
3592
3593 static const struct file_operations set_tracer_fops = {
3594         .open           = tracing_open_generic,
3595         .read           = tracing_set_trace_read,
3596         .write          = tracing_set_trace_write,
3597 };
3598
3599 static const struct file_operations tracing_pipe_fops = {
3600         .open           = tracing_open_pipe,
3601         .poll           = tracing_poll_pipe,
3602         .read           = tracing_read_pipe,
3603         .splice_read    = tracing_splice_read_pipe,
3604         .release        = tracing_release_pipe,
3605 };
3606
3607 static const struct file_operations tracing_entries_fops = {
3608         .open           = tracing_open_generic,
3609         .read           = tracing_entries_read,
3610         .write          = tracing_entries_write,
3611 };
3612
3613 static const struct file_operations tracing_mark_fops = {
3614         .open           = tracing_open_generic,
3615         .write          = tracing_mark_write,
3616 };
3617
3618 static const struct file_operations trace_clock_fops = {
3619         .open           = tracing_clock_open,
3620         .read           = seq_read,
3621         .llseek         = seq_lseek,
3622         .release        = single_release,
3623         .write          = tracing_clock_write,
3624 };
3625
3626 struct ftrace_buffer_info {
3627         struct trace_array      *tr;
3628         void                    *spare;
3629         int                     cpu;
3630         unsigned int            read;
3631 };
3632
3633 static int tracing_buffers_open(struct inode *inode, struct file *filp)
3634 {
3635         int cpu = (int)(long)inode->i_private;
3636         struct ftrace_buffer_info *info;
3637
3638         if (tracing_disabled)
3639                 return -ENODEV;
3640
3641         info = kzalloc(sizeof(*info), GFP_KERNEL);
3642         if (!info)
3643                 return -ENOMEM;
3644
3645         info->tr        = &global_trace;
3646         info->cpu       = cpu;
3647         info->spare     = NULL;
3648         /* Force reading ring buffer for first read */
3649         info->read      = (unsigned int)-1;
3650
3651         filp->private_data = info;
3652
3653         return nonseekable_open(inode, filp);
3654 }
3655
3656 static ssize_t
3657 tracing_buffers_read(struct file *filp, char __user *ubuf,
3658                      size_t count, loff_t *ppos)
3659 {
3660         struct ftrace_buffer_info *info = filp->private_data;
3661         ssize_t ret;
3662         size_t size;
3663
3664         if (!count)
3665                 return 0;
3666
3667         if (!info->spare)
3668                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3669         if (!info->spare)
3670                 return -ENOMEM;
3671
3672         /* Do we have previous read data to read? */
3673         if (info->read < PAGE_SIZE)
3674                 goto read;
3675
3676         info->read = 0;
3677
3678         trace_access_lock(info->cpu);
3679         ret = ring_buffer_read_page(info->tr->buffer,
3680                                     &info->spare,
3681                                     count,
3682                                     info->cpu, 0);
3683         trace_access_unlock(info->cpu);
3684         if (ret < 0)
3685                 return 0;
3686
3687 read:
3688         size = PAGE_SIZE - info->read;
3689         if (size > count)
3690                 size = count;
3691
3692         ret = copy_to_user(ubuf, info->spare + info->read, size);
3693         if (ret == size)
3694                 return -EFAULT;
3695         size -= ret;
3696
3697         *ppos += size;
3698         info->read += size;
3699
3700         return size;
3701 }
3702
3703 static int tracing_buffers_release(struct inode *inode, struct file *file)
3704 {
3705         struct ftrace_buffer_info *info = file->private_data;
3706
3707         if (info->spare)
3708                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3709         kfree(info);
3710
3711         return 0;
3712 }
3713
3714 struct buffer_ref {
3715         struct ring_buffer      *buffer;
3716         void                    *page;
3717         int                     ref;
3718 };
3719
3720 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3721                                     struct pipe_buffer *buf)
3722 {
3723         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3724
3725         if (--ref->ref)
3726                 return;
3727
3728         ring_buffer_free_read_page(ref->buffer, ref->page);
3729         kfree(ref);
3730         buf->private = 0;
3731 }
3732
3733 static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3734                                  struct pipe_buffer *buf)
3735 {
3736         return 1;
3737 }
3738
3739 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3740                                 struct pipe_buffer *buf)
3741 {
3742         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3743
3744         ref->ref++;
3745 }
3746
3747 /* Pipe buffer operations for a buffer. */
3748 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3749         .can_merge              = 0,
3750         .map                    = generic_pipe_buf_map,
3751         .unmap                  = generic_pipe_buf_unmap,
3752         .confirm                = generic_pipe_buf_confirm,
3753         .release                = buffer_pipe_buf_release,
3754         .steal                  = buffer_pipe_buf_steal,
3755         .get                    = buffer_pipe_buf_get,
3756 };
3757
3758 /*
3759  * Callback from splice_to_pipe(), if we need to release some pages
3760  * at the end of the spd in case we error'ed out in filling the pipe.
3761  */
3762 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3763 {
3764         struct buffer_ref *ref =
3765                 (struct buffer_ref *)spd->partial[i].private;
3766
3767         if (--ref->ref)
3768                 return;
3769
3770         ring_buffer_free_read_page(ref->buffer, ref->page);
3771         kfree(ref);
3772         spd->partial[i].private = 0;
3773 }
3774
3775 static ssize_t
3776 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3777                             struct pipe_inode_info *pipe, size_t len,
3778                             unsigned int flags)
3779 {
3780         struct ftrace_buffer_info *info = file->private_data;
3781         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3782         struct page *pages_def[PIPE_DEF_BUFFERS];
3783         struct splice_pipe_desc spd = {
3784                 .pages          = pages_def,
3785                 .partial        = partial_def,
3786                 .flags          = flags,
3787                 .ops            = &buffer_pipe_buf_ops,
3788                 .spd_release    = buffer_spd_release,
3789         };
3790         struct buffer_ref *ref;
3791         int entries, size, i;
3792         size_t ret;
3793
3794         if (splice_grow_spd(pipe, &spd))
3795                 return -ENOMEM;
3796
3797         if (*ppos & (PAGE_SIZE - 1)) {
3798                 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3799                 ret = -EINVAL;
3800                 goto out;
3801         }
3802
3803         if (len & (PAGE_SIZE - 1)) {
3804                 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3805                 if (len < PAGE_SIZE) {
3806                         ret = -EINVAL;
3807                         goto out;
3808                 }
3809                 len &= PAGE_MASK;
3810         }
3811
3812         trace_access_lock(info->cpu);
3813         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3814
3815         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
3816                 struct page *page;
3817                 int r;
3818
3819                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3820                 if (!ref)
3821                         break;
3822
3823                 ref->ref = 1;
3824                 ref->buffer = info->tr->buffer;
3825                 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3826                 if (!ref->page) {
3827                         kfree(ref);
3828                         break;
3829                 }
3830
3831                 r = ring_buffer_read_page(ref->buffer, &ref->page,
3832                                           len, info->cpu, 1);
3833                 if (r < 0) {
3834                         ring_buffer_free_read_page(ref->buffer,
3835                                                    ref->page);
3836                         kfree(ref);
3837                         break;
3838                 }
3839
3840                 /*
3841                  * zero out any left over data, this is going to
3842                  * user land.
3843                  */
3844                 size = ring_buffer_page_len(ref->page);
3845                 if (size < PAGE_SIZE)
3846                         memset(ref->page + size, 0, PAGE_SIZE - size);
3847
3848                 page = virt_to_page(ref->page);
3849
3850                 spd.pages[i] = page;
3851                 spd.partial[i].len = PAGE_SIZE;
3852                 spd.partial[i].offset = 0;
3853                 spd.partial[i].private = (unsigned long)ref;
3854                 spd.nr_pages++;
3855                 *ppos += PAGE_SIZE;
3856
3857                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3858         }
3859
3860         trace_access_unlock(info->cpu);
3861         spd.nr_pages = i;
3862
3863         /* did we read anything? */
3864         if (!spd.nr_pages) {
3865                 if (flags & SPLICE_F_NONBLOCK)
3866                         ret = -EAGAIN;
3867                 else
3868                         ret = 0;
3869                 /* TODO: block */
3870                 goto out;
3871         }
3872
3873         ret = splice_to_pipe(pipe, &spd);
3874         splice_shrink_spd(pipe, &spd);
3875 out:
3876         return ret;
3877 }
3878
3879 static const struct file_operations tracing_buffers_fops = {
3880         .open           = tracing_buffers_open,
3881         .read           = tracing_buffers_read,
3882         .release        = tracing_buffers_release,
3883         .splice_read    = tracing_buffers_splice_read,
3884         .llseek         = no_llseek,
3885 };
3886
3887 static ssize_t
3888 tracing_stats_read(struct file *filp, char __user *ubuf,
3889                    size_t count, loff_t *ppos)
3890 {
3891         unsigned long cpu = (unsigned long)filp->private_data;
3892         struct trace_array *tr = &global_trace;
3893         struct trace_seq *s;
3894         unsigned long cnt;
3895
3896         s = kmalloc(sizeof(*s), GFP_KERNEL);
3897         if (!s)
3898                 return -ENOMEM;
3899
3900         trace_seq_init(s);
3901
3902         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
3903         trace_seq_printf(s, "entries: %ld\n", cnt);
3904
3905         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
3906         trace_seq_printf(s, "overrun: %ld\n", cnt);
3907
3908         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3909         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3910
3911         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3912
3913         kfree(s);
3914
3915         return count;
3916 }
3917
3918 static const struct file_operations tracing_stats_fops = {
3919         .open           = tracing_open_generic,
3920         .read           = tracing_stats_read,
3921 };
3922
3923 #ifdef CONFIG_DYNAMIC_FTRACE
3924
3925 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
3926 {
3927         return 0;
3928 }
3929
3930 static ssize_t
3931 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
3932                   size_t cnt, loff_t *ppos)
3933 {
3934         static char ftrace_dyn_info_buffer[1024];
3935         static DEFINE_MUTEX(dyn_info_mutex);
3936         unsigned long *p = filp->private_data;
3937         char *buf = ftrace_dyn_info_buffer;
3938         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
3939         int r;
3940
3941         mutex_lock(&dyn_info_mutex);
3942         r = sprintf(buf, "%ld ", *p);
3943
3944         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
3945         buf[r++] = '\n';
3946
3947         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3948
3949         mutex_unlock(&dyn_info_mutex);
3950
3951         return r;
3952 }
3953
3954 static const struct file_operations tracing_dyn_info_fops = {
3955         .open           = tracing_open_generic,
3956         .read           = tracing_read_dyn_info,
3957 };
3958 #endif
3959
3960 static struct dentry *d_tracer;
3961
3962 struct dentry *tracing_init_dentry(void)
3963 {
3964         static int once;
3965
3966         if (d_tracer)
3967                 return d_tracer;
3968
3969         if (!debugfs_initialized())
3970                 return NULL;
3971
3972         d_tracer = debugfs_create_dir("tracing", NULL);
3973
3974         if (!d_tracer && !once) {
3975                 once = 1;
3976                 pr_warning("Could not create debugfs directory 'tracing'\n");
3977                 return NULL;
3978         }
3979
3980         return d_tracer;
3981 }
3982
3983 static struct dentry *d_percpu;
3984
3985 struct dentry *tracing_dentry_percpu(void)
3986 {
3987         static int once;
3988         struct dentry *d_tracer;
3989
3990         if (d_percpu)
3991                 return d_percpu;
3992
3993         d_tracer = tracing_init_dentry();
3994
3995         if (!d_tracer)
3996                 return NULL;
3997
3998         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
3999
4000         if (!d_percpu && !once) {
4001                 once = 1;
4002                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4003                 return NULL;
4004         }
4005
4006         return d_percpu;
4007 }
4008
4009 static void tracing_init_debugfs_percpu(long cpu)
4010 {
4011         struct dentry *d_percpu = tracing_dentry_percpu();
4012         struct dentry *d_cpu;
4013         /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
4014         char cpu_dir[7];
4015
4016         if (cpu > 999 || cpu < 0)
4017                 return;
4018
4019         sprintf(cpu_dir, "cpu%ld", cpu);
4020         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4021         if (!d_cpu) {
4022                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4023                 return;
4024         }
4025
4026         /* per cpu trace_pipe */
4027         trace_create_file("trace_pipe", 0444, d_cpu,
4028                         (void *) cpu, &tracing_pipe_fops);
4029
4030         /* per cpu trace */
4031         trace_create_file("trace", 0644, d_cpu,
4032                         (void *) cpu, &tracing_fops);
4033
4034         trace_create_file("trace_pipe_raw", 0444, d_cpu,
4035                         (void *) cpu, &tracing_buffers_fops);
4036
4037         trace_create_file("stats", 0444, d_cpu,
4038                         (void *) cpu, &tracing_stats_fops);
4039 }
4040
4041 #ifdef CONFIG_FTRACE_SELFTEST
4042 /* Let selftest have access to static functions in this file */
4043 #include "trace_selftest.c"
4044 #endif
4045
4046 struct trace_option_dentry {
4047         struct tracer_opt               *opt;
4048         struct tracer_flags             *flags;
4049         struct dentry                   *entry;
4050 };
4051
4052 static ssize_t
4053 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4054                         loff_t *ppos)
4055 {
4056         struct trace_option_dentry *topt = filp->private_data;
4057         char *buf;
4058
4059         if (topt->flags->val & topt->opt->bit)
4060                 buf = "1\n";
4061         else
4062                 buf = "0\n";
4063
4064         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4065 }
4066
4067 static ssize_t
4068 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4069                          loff_t *ppos)
4070 {
4071         struct trace_option_dentry *topt = filp->private_data;
4072         unsigned long val;
4073         char buf[64];
4074         int ret;
4075
4076         if (cnt >= sizeof(buf))
4077                 return -EINVAL;
4078
4079         if (copy_from_user(&buf, ubuf, cnt))
4080                 return -EFAULT;
4081
4082         buf[cnt] = 0;
4083
4084         ret = strict_strtoul(buf, 10, &val);
4085         if (ret < 0)
4086                 return ret;
4087
4088         if (val != 0 && val != 1)
4089                 return -EINVAL;
4090
4091         if (!!(topt->flags->val & topt->opt->bit) != val) {
4092                 mutex_lock(&trace_types_lock);
4093                 ret = __set_tracer_option(current_trace, topt->flags,
4094                                           topt->opt, !val);
4095                 mutex_unlock(&trace_types_lock);
4096                 if (ret)
4097                         return ret;
4098         }
4099
4100         *ppos += cnt;
4101
4102         return cnt;
4103 }
4104
4105
4106 static const struct file_operations trace_options_fops = {
4107         .open = tracing_open_generic,
4108         .read = trace_options_read,
4109         .write = trace_options_write,
4110 };
4111
4112 static ssize_t
4113 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4114                         loff_t *ppos)
4115 {
4116         long index = (long)filp->private_data;
4117         char *buf;
4118
4119         if (trace_flags & (1 << index))
4120                 buf = "1\n";
4121         else
4122                 buf = "0\n";
4123
4124         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4125 }
4126
4127 static ssize_t
4128 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4129                          loff_t *ppos)
4130 {
4131         long index = (long)filp->private_data;
4132         char buf[64];
4133         unsigned long val;
4134         int ret;
4135
4136         if (cnt >= sizeof(buf))
4137                 return -EINVAL;
4138
4139         if (copy_from_user(&buf, ubuf, cnt))
4140                 return -EFAULT;
4141
4142         buf[cnt] = 0;
4143
4144         ret = strict_strtoul(buf, 10, &val);
4145         if (ret < 0)
4146                 return ret;
4147
4148         if (val != 0 && val != 1)
4149                 return -EINVAL;
4150         set_tracer_flags(1 << index, val);
4151
4152         *ppos += cnt;
4153
4154         return cnt;
4155 }
4156
4157 static const struct file_operations trace_options_core_fops = {
4158         .open = tracing_open_generic,
4159         .read = trace_options_core_read,
4160         .write = trace_options_core_write,
4161 };
4162
4163 struct dentry *trace_create_file(const char *name,
4164                                  mode_t mode,
4165                                  struct dentry *parent,
4166                                  void *data,
4167                                  const struct file_operations *fops)
4168 {
4169         struct dentry *ret;
4170
4171         ret = debugfs_create_file(name, mode, parent, data, fops);
4172         if (!ret)
4173                 pr_warning("Could not create debugfs '%s' entry\n", name);
4174
4175         return ret;
4176 }
4177
4178
4179 static struct dentry *trace_options_init_dentry(void)
4180 {
4181         struct dentry *d_tracer;
4182         static struct dentry *t_options;
4183
4184         if (t_options)
4185                 return t_options;
4186
4187         d_tracer = tracing_init_dentry();
4188         if (!d_tracer)
4189                 return NULL;
4190
4191         t_options = debugfs_create_dir("options", d_tracer);
4192         if (!t_options) {
4193                 pr_warning("Could not create debugfs directory 'options'\n");
4194                 return NULL;
4195         }
4196
4197         return t_options;
4198 }
4199
4200 static void
4201 create_trace_option_file(struct trace_option_dentry *topt,
4202                          struct tracer_flags *flags,
4203                          struct tracer_opt *opt)
4204 {
4205         struct dentry *t_options;
4206
4207         t_options = trace_options_init_dentry();
4208         if (!t_options)
4209                 return;
4210
4211         topt->flags = flags;
4212         topt->opt = opt;
4213
4214         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4215                                     &trace_options_fops);
4216
4217 }
4218
4219 static struct trace_option_dentry *
4220 create_trace_option_files(struct tracer *tracer)
4221 {
4222         struct trace_option_dentry *topts;
4223         struct tracer_flags *flags;
4224         struct tracer_opt *opts;
4225         int cnt;
4226
4227         if (!tracer)
4228                 return NULL;
4229
4230         flags = tracer->flags;
4231
4232         if (!flags || !flags->opts)
4233                 return NULL;
4234
4235         opts = flags->opts;
4236
4237         for (cnt = 0; opts[cnt].name; cnt++)
4238                 ;
4239
4240         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4241         if (!topts)
4242                 return NULL;
4243
4244         for (cnt = 0; opts[cnt].name; cnt++)
4245                 create_trace_option_file(&topts[cnt], flags,
4246                                          &opts[cnt]);
4247
4248         return topts;
4249 }
4250
4251 static void
4252 destroy_trace_option_files(struct trace_option_dentry *topts)
4253 {
4254         int cnt;
4255
4256         if (!topts)
4257                 return;
4258
4259         for (cnt = 0; topts[cnt].opt; cnt++) {
4260                 if (topts[cnt].entry)
4261                         debugfs_remove(topts[cnt].entry);
4262         }
4263
4264         kfree(topts);
4265 }
4266
4267 static struct dentry *
4268 create_trace_option_core_file(const char *option, long index)
4269 {
4270         struct dentry *t_options;
4271
4272         t_options = trace_options_init_dentry();
4273         if (!t_options)
4274                 return NULL;
4275
4276         return trace_create_file(option, 0644, t_options, (void *)index,
4277                                     &trace_options_core_fops);
4278 }
4279
4280 static __init void create_trace_options_dir(void)
4281 {
4282         struct dentry *t_options;
4283         int i;
4284
4285         t_options = trace_options_init_dentry();
4286         if (!t_options)
4287                 return;
4288
4289         for (i = 0; trace_options[i]; i++)
4290                 create_trace_option_core_file(trace_options[i], i);
4291 }
4292
4293 static __init int tracer_init_debugfs(void)
4294 {
4295         struct dentry *d_tracer;
4296         int cpu;
4297
4298         trace_access_lock_init();
4299
4300         d_tracer = tracing_init_dentry();
4301
4302         trace_create_file("tracing_enabled", 0644, d_tracer,
4303                         &global_trace, &tracing_ctrl_fops);
4304
4305         trace_create_file("trace_options", 0644, d_tracer,
4306                         NULL, &tracing_iter_fops);
4307
4308         trace_create_file("tracing_cpumask", 0644, d_tracer,
4309                         NULL, &tracing_cpumask_fops);
4310
4311         trace_create_file("trace", 0644, d_tracer,
4312                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4313
4314         trace_create_file("available_tracers", 0444, d_tracer,
4315                         &global_trace, &show_traces_fops);
4316
4317         trace_create_file("current_tracer", 0644, d_tracer,
4318                         &global_trace, &set_tracer_fops);
4319
4320 #ifdef CONFIG_TRACER_MAX_TRACE
4321         trace_create_file("tracing_max_latency", 0644, d_tracer,
4322                         &tracing_max_latency, &tracing_max_lat_fops);
4323 #endif
4324
4325         trace_create_file("tracing_thresh", 0644, d_tracer,
4326                         &tracing_thresh, &tracing_max_lat_fops);
4327
4328         trace_create_file("README", 0444, d_tracer,
4329                         NULL, &tracing_readme_fops);
4330
4331         trace_create_file("trace_pipe", 0444, d_tracer,
4332                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4333
4334         trace_create_file("buffer_size_kb", 0644, d_tracer,
4335                         &global_trace, &tracing_entries_fops);
4336
4337         trace_create_file("trace_marker", 0220, d_tracer,
4338                         NULL, &tracing_mark_fops);
4339
4340         trace_create_file("saved_cmdlines", 0444, d_tracer,
4341                         NULL, &tracing_saved_cmdlines_fops);
4342
4343         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4344                           &trace_clock_fops);
4345
4346 #ifdef CONFIG_DYNAMIC_FTRACE
4347         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4348                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4349 #endif
4350 #ifdef CONFIG_SYSPROF_TRACER
4351         init_tracer_sysprof_debugfs(d_tracer);
4352 #endif
4353
4354         create_trace_options_dir();
4355
4356         for_each_tracing_cpu(cpu)
4357                 tracing_init_debugfs_percpu(cpu);
4358
4359         return 0;
4360 }
4361
4362 static int trace_panic_handler(struct notifier_block *this,
4363                                unsigned long event, void *unused)
4364 {
4365         if (ftrace_dump_on_oops)
4366                 ftrace_dump(ftrace_dump_on_oops);
4367         return NOTIFY_OK;
4368 }
4369
4370 static struct notifier_block trace_panic_notifier = {
4371         .notifier_call  = trace_panic_handler,
4372         .next           = NULL,
4373         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4374 };
4375
4376 static int trace_die_handler(struct notifier_block *self,
4377                              unsigned long val,
4378                              void *data)
4379 {
4380         switch (val) {
4381         case DIE_OOPS:
4382                 if (ftrace_dump_on_oops)
4383                         ftrace_dump(ftrace_dump_on_oops);
4384                 break;
4385         default:
4386                 break;
4387         }
4388         return NOTIFY_OK;
4389 }
4390
4391 static struct notifier_block trace_die_notifier = {
4392         .notifier_call = trace_die_handler,
4393         .priority = 200
4394 };
4395
4396 /*
4397  * printk is set to max of 1024, we really don't need it that big.
4398  * Nothing should be printing 1000 characters anyway.
4399  */
4400 #define TRACE_MAX_PRINT         1000
4401
4402 /*
4403  * Define here KERN_TRACE so that we have one place to modify
4404  * it if we decide to change what log level the ftrace dump
4405  * should be at.
4406  */
4407 #define KERN_TRACE              KERN_EMERG
4408
4409 static void
4410 trace_printk_seq(struct trace_seq *s)
4411 {
4412         /* Probably should print a warning here. */
4413         if (s->len >= 1000)
4414                 s->len = 1000;
4415
4416         /* should be zero ended, but we are paranoid. */
4417         s->buffer[s->len] = 0;
4418
4419         printk(KERN_TRACE "%s", s->buffer);
4420
4421         trace_seq_init(s);
4422 }
4423
4424 static void
4425 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4426 {
4427         static arch_spinlock_t ftrace_dump_lock =
4428                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4429         /* use static because iter can be a bit big for the stack */
4430         static struct trace_iterator iter;
4431         unsigned int old_userobj;
4432         static int dump_ran;
4433         unsigned long flags;
4434         int cnt = 0, cpu;
4435
4436         /* only one dump */
4437         local_irq_save(flags);
4438         arch_spin_lock(&ftrace_dump_lock);
4439         if (dump_ran)
4440                 goto out;
4441
4442         dump_ran = 1;
4443
4444         tracing_off();
4445
4446         if (disable_tracing)
4447                 ftrace_kill();
4448
4449         for_each_tracing_cpu(cpu) {
4450                 atomic_inc(&global_trace.data[cpu]->disabled);
4451         }
4452
4453         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4454
4455         /* don't look at user memory in panic mode */
4456         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4457
4458         /* Simulate the iterator */
4459         iter.tr = &global_trace;
4460         iter.trace = current_trace;
4461
4462         switch (oops_dump_mode) {
4463         case DUMP_ALL:
4464                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4465                 break;
4466         case DUMP_ORIG:
4467                 iter.cpu_file = raw_smp_processor_id();
4468                 break;
4469         case DUMP_NONE:
4470                 goto out_enable;
4471         default:
4472                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4473                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4474         }
4475
4476         printk(KERN_TRACE "Dumping ftrace buffer:\n");
4477
4478         /*
4479          * We need to stop all tracing on all CPUS to read the
4480          * the next buffer. This is a bit expensive, but is
4481          * not done often. We fill all what we can read,
4482          * and then release the locks again.
4483          */
4484
4485         while (!trace_empty(&iter)) {
4486
4487                 if (!cnt)
4488                         printk(KERN_TRACE "---------------------------------\n");
4489
4490                 cnt++;
4491
4492                 /* reset all but tr, trace, and overruns */
4493                 memset(&iter.seq, 0,
4494                        sizeof(struct trace_iterator) -
4495                        offsetof(struct trace_iterator, seq));
4496                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4497                 iter.pos = -1;
4498
4499                 if (find_next_entry_inc(&iter) != NULL) {
4500                         int ret;
4501
4502                         ret = print_trace_line(&iter);
4503                         if (ret != TRACE_TYPE_NO_CONSUME)
4504                                 trace_consume(&iter);
4505                 }
4506
4507                 trace_printk_seq(&iter.seq);
4508         }
4509
4510         if (!cnt)
4511                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
4512         else
4513                 printk(KERN_TRACE "---------------------------------\n");
4514
4515  out_enable:
4516         /* Re-enable tracing if requested */
4517         if (!disable_tracing) {
4518                 trace_flags |= old_userobj;
4519
4520                 for_each_tracing_cpu(cpu) {
4521                         atomic_dec(&global_trace.data[cpu]->disabled);
4522                 }
4523                 tracing_on();
4524         }
4525
4526  out:
4527         arch_spin_unlock(&ftrace_dump_lock);
4528         local_irq_restore(flags);
4529 }
4530
4531 /* By default: disable tracing after the dump */
4532 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4533 {
4534         __ftrace_dump(true, oops_dump_mode);
4535 }
4536
4537 __init static int tracer_alloc_buffers(void)
4538 {
4539         int ring_buf_size;
4540         int i;
4541         int ret = -ENOMEM;
4542
4543         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
4544                 goto out;
4545
4546         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4547                 goto out_free_buffer_mask;
4548
4549         /* To save memory, keep the ring buffer size to its minimum */
4550         if (ring_buffer_expanded)
4551                 ring_buf_size = trace_buf_size;
4552         else
4553                 ring_buf_size = 1;
4554
4555         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4556         cpumask_copy(tracing_cpumask, cpu_all_mask);
4557
4558         /* TODO: make the number of buffers hot pluggable with CPUS */
4559         global_trace.buffer = ring_buffer_alloc(ring_buf_size,
4560                                                    TRACE_BUFFER_FLAGS);
4561         if (!global_trace.buffer) {
4562                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
4563                 WARN_ON(1);
4564                 goto out_free_cpumask;
4565         }
4566         global_trace.entries = ring_buffer_size(global_trace.buffer);
4567
4568
4569 #ifdef CONFIG_TRACER_MAX_TRACE
4570         max_tr.buffer = ring_buffer_alloc(ring_buf_size,
4571                                              TRACE_BUFFER_FLAGS);
4572         if (!max_tr.buffer) {
4573                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4574                 WARN_ON(1);
4575                 ring_buffer_free(global_trace.buffer);
4576                 goto out_free_cpumask;
4577         }
4578         max_tr.entries = ring_buffer_size(max_tr.buffer);
4579         WARN_ON(max_tr.entries != global_trace.entries);
4580 #endif
4581
4582         /* Allocate the first page for all buffers */
4583         for_each_tracing_cpu(i) {
4584                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4585                 max_tr.data[i] = &per_cpu(max_tr_data, i);
4586         }
4587
4588         trace_init_cmdlines();
4589
4590         register_tracer(&nop_trace);
4591         current_trace = &nop_trace;
4592 #ifdef CONFIG_BOOT_TRACER
4593         register_tracer(&boot_tracer);
4594 #endif
4595         /* All seems OK, enable tracing */
4596         tracing_disabled = 0;
4597
4598         atomic_notifier_chain_register(&panic_notifier_list,
4599                                        &trace_panic_notifier);
4600
4601         register_die_notifier(&trace_die_notifier);
4602
4603         return 0;
4604
4605 out_free_cpumask:
4606         free_cpumask_var(tracing_cpumask);
4607 out_free_buffer_mask:
4608         free_cpumask_var(tracing_buffer_mask);
4609 out:
4610         return ret;
4611 }
4612
4613 __init static int clear_boot_tracer(void)
4614 {
4615         /*
4616          * The default tracer at boot buffer is an init section.
4617          * This function is called in lateinit. If we did not
4618          * find the boot tracer, then clear it out, to prevent
4619          * later registration from accessing the buffer that is
4620          * about to be freed.
4621          */
4622         if (!default_bootup_tracer)
4623                 return 0;
4624
4625         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4626                default_bootup_tracer);
4627         default_bootup_tracer = NULL;
4628
4629         return 0;
4630 }
4631
4632 early_initcall(tracer_alloc_buffers);
4633 fs_initcall(tracer_init_debugfs);
4634 late_initcall(clear_boot_tracer);