blob: 7511de35257fadb42b8e846a2f2b7ffdeefd7faa [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040082 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
Steven Rostedt0f048702008-11-05 16:05:44 -050089 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
Hannes Eder4fd27352009-02-10 19:44:12 +010094static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050095
Christoph Lameter9288f992009-10-07 19:17:45 -040096DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500121static int tracing_set_tracer(const char *buf);
122
Li Zefanee6c2c12009-09-18 14:06:47 +0800123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500125static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100126
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500127static bool allocate_snapshot;
128
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200129static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100130{
Chen Gang67012ab2013-04-08 12:06:44 +0800131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500132 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400133 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500134 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135 return 1;
136}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200137__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100138
Steven Rostedt944ac422008-10-23 19:26:08 -0400139static int __init set_ftrace_dump_on_oops(char *str)
140{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200154
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400162static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400169__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400183
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800184unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200203static struct trace_array global_trace;
204
Steven Rostedtae63b312012-05-03 23:09:03 -0400205LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200206
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400207int trace_array_get(struct trace_array *this_tr)
208{
209 struct trace_array *tr;
210 int ret = -ENODEV;
211
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214 if (tr == this_tr) {
215 tr->ref++;
216 ret = 0;
217 break;
218 }
219 }
220 mutex_unlock(&trace_types_lock);
221
222 return ret;
223}
224
225static void __trace_array_put(struct trace_array *this_tr)
226{
227 WARN_ON(!this_tr->ref);
228 this_tr->ref--;
229}
230
231void trace_array_put(struct trace_array *this_tr)
232{
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
236}
237
Tom Zanussif306cc82013-10-24 08:34:17 -0500238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500241{
Tom Zanussif306cc82013-10-24 08:34:17 -0500242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500249}
Tom Zanussif306cc82013-10-24 08:34:17 -0500250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
263}
264EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265
Alexander Z Lam94571582013-08-02 18:36:16 -0700266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400267{
268 u64 ts;
269
270 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700271 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400272 return trace_clock_local();
273
Alexander Z Lam94571582013-08-02 18:36:16 -0700274 ts = ring_buffer_time_stamp(buf->buffer, cpu);
275 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400276
277 return ts;
278}
279
Alexander Z Lam94571582013-08-02 18:36:16 -0700280cycle_t ftrace_now(int cpu)
281{
282 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
283}
284
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400285/**
286 * tracing_is_enabled - Show if global_trace has been disabled
287 *
288 * Shows if the global trace has been enabled or not. It uses the
289 * mirror flag "buffer_disabled" to be used in fast paths such as for
290 * the irqsoff tracer. But it may be inaccurate due to races. If you
291 * need to know the accurate state, use tracing_is_on() which is a little
292 * slower, but accurate.
293 */
Steven Rostedt90369902008-11-05 16:05:44 -0500294int tracing_is_enabled(void)
295{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400296 /*
297 * For quick access (irqsoff uses this in fast path), just
298 * return the mirror variable of the state of the ring buffer.
299 * It's a little racy, but we don't really care.
300 */
301 smp_rmb();
302 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500303}
304
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200305/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400306 * trace_buf_size is the size in bytes that is allocated
307 * for a buffer. Note, the number of bytes is always rounded
308 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400309 *
310 * This number is purposely set to a low number of 16384.
311 * If the dump on oops happens, it will be much appreciated
312 * to not have to wait for all that output. Anyway this can be
313 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200314 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400315#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400316
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400317static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200318
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200319/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200320static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200321
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200322/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200323 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200324 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700325DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800327/*
328 * serialize the access of the ring buffer
329 *
330 * ring buffer serializes readers, but it is low level protection.
331 * The validity of the events (which returns by ring_buffer_peek() ..etc)
332 * are not protected by ring buffer.
333 *
334 * The content of events may become garbage if we allow other process consumes
335 * these events concurrently:
336 * A) the page of the consumed events may become a normal page
337 * (not reader page) in ring buffer, and this page will be rewrited
338 * by events producer.
339 * B) The page of the consumed events may become a page for splice_read,
340 * and this page will be returned to system.
341 *
342 * These primitives allow multi process access to different cpu ring buffer
343 * concurrently.
344 *
345 * These primitives don't distinguish read-only and read-consume access.
346 * Multi read-only access are also serialized.
347 */
348
349#ifdef CONFIG_SMP
350static DECLARE_RWSEM(all_cpu_access_lock);
351static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
352
353static inline void trace_access_lock(int cpu)
354{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500355 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800356 /* gain it for accessing the whole ring buffer. */
357 down_write(&all_cpu_access_lock);
358 } else {
359 /* gain it for accessing a cpu ring buffer. */
360
Steven Rostedtae3b5092013-01-23 15:22:59 -0500361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800362 down_read(&all_cpu_access_lock);
363
364 /* Secondly block other access to this @cpu ring buffer. */
365 mutex_lock(&per_cpu(cpu_access_lock, cpu));
366 }
367}
368
369static inline void trace_access_unlock(int cpu)
370{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500371 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800372 up_write(&all_cpu_access_lock);
373 } else {
374 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
375 up_read(&all_cpu_access_lock);
376 }
377}
378
379static inline void trace_access_lock_init(void)
380{
381 int cpu;
382
383 for_each_possible_cpu(cpu)
384 mutex_init(&per_cpu(cpu_access_lock, cpu));
385}
386
387#else
388
389static DEFINE_MUTEX(access_lock);
390
391static inline void trace_access_lock(int cpu)
392{
393 (void)cpu;
394 mutex_lock(&access_lock);
395}
396
397static inline void trace_access_unlock(int cpu)
398{
399 (void)cpu;
400 mutex_unlock(&access_lock);
401}
402
403static inline void trace_access_lock_init(void)
404{
405}
406
407#endif
408
Steven Rostedtee6bce52008-11-12 17:52:37 -0500409/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500410unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400411 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500412 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400413 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700414
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400415static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400416{
417 if (tr->trace_buffer.buffer)
418 ring_buffer_record_on(tr->trace_buffer.buffer);
419 /*
420 * This flag is looked at when buffers haven't been allocated
421 * yet, or by some tracers (like irqsoff), that just want to
422 * know if the ring buffer has been disabled, but it can handle
423 * races of where it gets disabled but we still do a record.
424 * As the check is in the fast path of the tracers, it is more
425 * important to be fast than accurate.
426 */
427 tr->buffer_disabled = 0;
428 /* Make the flag seen by readers */
429 smp_wmb();
430}
431
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200432/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500433 * tracing_on - enable tracing buffers
434 *
435 * This function enables tracing buffers that may have been
436 * disabled with tracing_off.
437 */
438void tracing_on(void)
439{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500441}
442EXPORT_SYMBOL_GPL(tracing_on);
443
444/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
449 */
450int __trace_puts(unsigned long ip, const char *str, int size)
451{
452 struct ring_buffer_event *event;
453 struct ring_buffer *buffer;
454 struct print_entry *entry;
455 unsigned long irq_flags;
456 int alloc;
457
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500458 if (unlikely(tracing_selftest_running || tracing_disabled))
459 return 0;
460
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500461 alloc = sizeof(*entry) + size + 2; /* possible \n added */
462
463 local_save_flags(irq_flags);
464 buffer = global_trace.trace_buffer.buffer;
465 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
466 irq_flags, preempt_count());
467 if (!event)
468 return 0;
469
470 entry = ring_buffer_event_data(event);
471 entry->ip = ip;
472
473 memcpy(&entry->buf, str, size);
474
475 /* Add a newline if necessary */
476 if (entry->buf[size - 1] != '\n') {
477 entry->buf[size] = '\n';
478 entry->buf[size + 1] = '\0';
479 } else
480 entry->buf[size] = '\0';
481
482 __buffer_unlock_commit(buffer, event);
483
484 return size;
485}
486EXPORT_SYMBOL_GPL(__trace_puts);
487
488/**
489 * __trace_bputs - write the pointer to a constant string into trace buffer
490 * @ip: The address of the caller
491 * @str: The constant string to write to the buffer to
492 */
493int __trace_bputs(unsigned long ip, const char *str)
494{
495 struct ring_buffer_event *event;
496 struct ring_buffer *buffer;
497 struct bputs_entry *entry;
498 unsigned long irq_flags;
499 int size = sizeof(struct bputs_entry);
500
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500501 if (unlikely(tracing_selftest_running || tracing_disabled))
502 return 0;
503
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500504 local_save_flags(irq_flags);
505 buffer = global_trace.trace_buffer.buffer;
506 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
507 irq_flags, preempt_count());
508 if (!event)
509 return 0;
510
511 entry = ring_buffer_event_data(event);
512 entry->ip = ip;
513 entry->str = str;
514
515 __buffer_unlock_commit(buffer, event);
516
517 return 1;
518}
519EXPORT_SYMBOL_GPL(__trace_bputs);
520
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500521#ifdef CONFIG_TRACER_SNAPSHOT
522/**
523 * trace_snapshot - take a snapshot of the current buffer.
524 *
525 * This causes a swap between the snapshot buffer and the current live
526 * tracing buffer. You can use this to take snapshots of the live
527 * trace when some condition is triggered, but continue to trace.
528 *
529 * Note, make sure to allocate the snapshot with either
530 * a tracing_snapshot_alloc(), or by doing it manually
531 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
532 *
533 * If the snapshot buffer is not allocated, it will stop tracing.
534 * Basically making a permanent snapshot.
535 */
536void tracing_snapshot(void)
537{
538 struct trace_array *tr = &global_trace;
539 struct tracer *tracer = tr->current_trace;
540 unsigned long flags;
541
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500542 if (in_nmi()) {
543 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
544 internal_trace_puts("*** snapshot is being ignored ***\n");
545 return;
546 }
547
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500548 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500549 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
550 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500551 tracing_off();
552 return;
553 }
554
555 /* Note, snapshot can not be used when the tracer uses it */
556 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500557 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
558 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559 return;
560 }
561
562 local_irq_save(flags);
563 update_max_tr(tr, current, smp_processor_id());
564 local_irq_restore(flags);
565}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500566EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500567
568static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
569 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400570static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
571
572static int alloc_snapshot(struct trace_array *tr)
573{
574 int ret;
575
576 if (!tr->allocated_snapshot) {
577
578 /* allocate spare buffer */
579 ret = resize_buffer_duplicate_size(&tr->max_buffer,
580 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
581 if (ret < 0)
582 return ret;
583
584 tr->allocated_snapshot = true;
585 }
586
587 return 0;
588}
589
590void free_snapshot(struct trace_array *tr)
591{
592 /*
593 * We don't free the ring buffer. instead, resize it because
594 * The max_tr ring buffer has some state (e.g. ring->clock) and
595 * we want preserve it.
596 */
597 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
598 set_buffer_entries(&tr->max_buffer, 1);
599 tracing_reset_online_cpus(&tr->max_buffer);
600 tr->allocated_snapshot = false;
601}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500602
603/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500604 * tracing_alloc_snapshot - allocate snapshot buffer.
605 *
606 * This only allocates the snapshot buffer if it isn't already
607 * allocated - it doesn't also take a snapshot.
608 *
609 * This is meant to be used in cases where the snapshot buffer needs
610 * to be set up for events that can't sleep but need to be able to
611 * trigger a snapshot.
612 */
613int tracing_alloc_snapshot(void)
614{
615 struct trace_array *tr = &global_trace;
616 int ret;
617
618 ret = alloc_snapshot(tr);
619 WARN_ON(ret < 0);
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
624
625/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
627 *
628 * This is similar to trace_snapshot(), but it will allocate the
629 * snapshot buffer if it isn't already allocated. Use this only
630 * where it is safe to sleep, as the allocation may sleep.
631 *
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
635 */
636void tracing_snapshot_alloc(void)
637{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 int ret;
639
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500640 ret = tracing_alloc_snapshot();
641 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400642 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500643
644 tracing_snapshot();
645}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500646EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500647#else
648void tracing_snapshot(void)
649{
650 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
651}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500652EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500653int tracing_alloc_snapshot(void)
654{
655 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
656 return -ENODEV;
657}
658EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659void tracing_snapshot_alloc(void)
660{
661 /* Give warning */
662 tracing_snapshot();
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500665#endif /* CONFIG_TRACER_SNAPSHOT */
666
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400667static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400668{
669 if (tr->trace_buffer.buffer)
670 ring_buffer_record_off(tr->trace_buffer.buffer);
671 /*
672 * This flag is looked at when buffers haven't been allocated
673 * yet, or by some tracers (like irqsoff), that just want to
674 * know if the ring buffer has been disabled, but it can handle
675 * races of where it gets disabled but we still do a record.
676 * As the check is in the fast path of the tracers, it is more
677 * important to be fast than accurate.
678 */
679 tr->buffer_disabled = 1;
680 /* Make the flag seen by readers */
681 smp_wmb();
682}
683
Steven Rostedt499e5472012-02-22 15:50:28 -0500684/**
685 * tracing_off - turn off tracing buffers
686 *
687 * This function stops the tracing buffers from recording data.
688 * It does not disable any overhead the tracers themselves may
689 * be causing. This function simply causes all recording to
690 * the ring buffers to fail.
691 */
692void tracing_off(void)
693{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500695}
696EXPORT_SYMBOL_GPL(tracing_off);
697
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400698void disable_trace_on_warning(void)
699{
700 if (__disable_trace_on_warning)
701 tracing_off();
702}
703
Steven Rostedt499e5472012-02-22 15:50:28 -0500704/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400705 * tracer_tracing_is_on - show real state of ring buffer enabled
706 * @tr : the trace array to know if ring buffer is enabled
707 *
708 * Shows real state of the ring buffer if it is enabled or not.
709 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400710static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400711{
712 if (tr->trace_buffer.buffer)
713 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
714 return !tr->buffer_disabled;
715}
716
Steven Rostedt499e5472012-02-22 15:50:28 -0500717/**
718 * tracing_is_on - show state of ring buffers enabled
719 */
720int tracing_is_on(void)
721{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400722 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500723}
724EXPORT_SYMBOL_GPL(tracing_is_on);
725
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400726static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200727{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400728 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200729
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200730 if (!str)
731 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800732 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200733 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800734 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200735 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400736 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200737 return 1;
738}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400739__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200740
Tim Bird0e950172010-02-25 15:36:43 -0800741static int __init set_tracing_thresh(char *str)
742{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800743 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800744 int ret;
745
746 if (!str)
747 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200748 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800749 if (ret < 0)
750 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800751 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800752 return 1;
753}
754__setup("tracing_thresh=", set_tracing_thresh);
755
Steven Rostedt57f50be2008-05-12 21:20:44 +0200756unsigned long nsecs_to_usecs(unsigned long nsecs)
757{
758 return nsecs / 1000;
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200762static const char *trace_options[] = {
763 "print-parent",
764 "sym-offset",
765 "sym-addr",
766 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200767 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200768 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200769 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200770 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200771 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100772 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500773 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500774 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500775 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200776 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200777 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100778 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200779 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500780 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400781 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400782 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800783 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800784 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400785 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500786 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700787 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400788 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200789 NULL
790};
791
Zhaolei5079f322009-08-25 16:12:56 +0800792static struct {
793 u64 (*func)(void);
794 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800795 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800796} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800797 { trace_clock_local, "local", 1 },
798 { trace_clock_global, "global", 1 },
799 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400800 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400801 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800802 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800803};
804
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200805/*
806 * trace_parser_get_init - gets the buffer for trace parser
807 */
808int trace_parser_get_init(struct trace_parser *parser, int size)
809{
810 memset(parser, 0, sizeof(*parser));
811
812 parser->buffer = kmalloc(size, GFP_KERNEL);
813 if (!parser->buffer)
814 return 1;
815
816 parser->size = size;
817 return 0;
818}
819
820/*
821 * trace_parser_put - frees the buffer for trace parser
822 */
823void trace_parser_put(struct trace_parser *parser)
824{
825 kfree(parser->buffer);
826}
827
828/*
829 * trace_get_user - reads the user input string separated by space
830 * (matched by isspace(ch))
831 *
832 * For each string found the 'struct trace_parser' is updated,
833 * and the function returns.
834 *
835 * Returns number of bytes read.
836 *
837 * See kernel/trace/trace.h for 'struct trace_parser' details.
838 */
839int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
840 size_t cnt, loff_t *ppos)
841{
842 char ch;
843 size_t read = 0;
844 ssize_t ret;
845
846 if (!*ppos)
847 trace_parser_clear(parser);
848
849 ret = get_user(ch, ubuf++);
850 if (ret)
851 goto out;
852
853 read++;
854 cnt--;
855
856 /*
857 * The parser is not finished with the last write,
858 * continue reading the user input without skipping spaces.
859 */
860 if (!parser->cont) {
861 /* skip white space */
862 while (cnt && isspace(ch)) {
863 ret = get_user(ch, ubuf++);
864 if (ret)
865 goto out;
866 read++;
867 cnt--;
868 }
869
870 /* only spaces were written */
871 if (isspace(ch)) {
872 *ppos += read;
873 ret = read;
874 goto out;
875 }
876
877 parser->idx = 0;
878 }
879
880 /* read the non-space input */
881 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800882 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200883 parser->buffer[parser->idx++] = ch;
884 else {
885 ret = -EINVAL;
886 goto out;
887 }
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891 read++;
892 cnt--;
893 }
894
895 /* We either got finished input or we have to wait for another call. */
896 if (isspace(ch)) {
897 parser->buffer[parser->idx] = 0;
898 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400899 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200900 parser->cont = true;
901 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400902 } else {
903 ret = -EINVAL;
904 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200905 }
906
907 *ppos += read;
908 ret = read;
909
910out:
911 return ret;
912}
913
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200914ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
915{
916 int len;
917 int ret;
918
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500919 if (!cnt)
920 return 0;
921
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200922 if (s->len <= s->readpos)
923 return -EBUSY;
924
925 len = s->len - s->readpos;
926 if (cnt > len)
927 cnt = len;
928 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500929 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200930 return -EFAULT;
931
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500932 cnt -= ret;
933
Steven Rostedte74da522009-03-04 20:31:11 -0500934 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200935 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200936}
937
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200938static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200939{
940 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200941
942 if (s->len <= s->readpos)
943 return -EBUSY;
944
945 len = s->len - s->readpos;
946 if (cnt > len)
947 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300948 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200949
Steven Rostedte74da522009-03-04 20:31:11 -0500950 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951 return cnt;
952}
953
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400954/*
955 * ftrace_max_lock is used to protect the swapping of buffers
956 * when taking a max snapshot. The buffers themselves are
957 * protected by per_cpu spinlocks. But the action of the swap
958 * needs its own lock.
959 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100960 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400961 * with performance when lockdep debugging is enabled.
962 *
963 * It is also used in other places outside the update_max_tr
964 * so it needs to be defined outside of the
965 * CONFIG_TRACER_MAX_TRACE.
966 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100967static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100968 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
973unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974
975/*
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 */
980static void
981__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
982{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500983 struct trace_buffer *trace_buf = &tr->trace_buffer;
984 struct trace_buffer *max_buf = &tr->max_buffer;
985 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
986 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500988 max_buf->cpu = cpu;
989 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400990
Steven Rostedt8248ac02009-09-02 12:27:41 -0400991 max_data->saved_latency = tracing_max_latency;
992 max_data->critical_start = data->critical_start;
993 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300995 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400996 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400997 /*
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1000 */
1001 if (tsk == current)
1002 max_data->uid = current_uid();
1003 else
1004 max_data->uid = task_uid(tsk);
1005
Steven Rostedt8248ac02009-09-02 12:27:41 -04001006 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1007 max_data->policy = tsk->policy;
1008 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001009
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk);
1012}
1013
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001014/**
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1016 * @tr: tracer
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1019 *
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1022 */
Ingo Molnare309b412008-05-12 21:20:51 +02001023void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001024update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1025{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001026 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001027
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001028 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001029 return;
1030
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001031 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001033 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001034 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001035 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001036 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001037 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001038
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001039 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001040
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001041 buf = tr->trace_buffer.buffer;
1042 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1043 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001046 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001047}
1048
1049/**
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1051 * @tr - tracer
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001054 *
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056 */
Ingo Molnare309b412008-05-12 21:20:51 +02001057void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001062 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001063 return;
1064
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001065 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001066 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001067 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001070 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001071
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001072 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001074 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001075
Steven Rostedte8165db2009-09-03 19:13:05 -04001076 if (ret == -EBUSY) {
1077 /*
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1082 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001083 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001084 "Failed to swap buffers due to commit in progress\n");
1085 }
1086
Steven Rostedte8165db2009-09-03 19:13:05 -04001087 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088
1089 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001090 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001092#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001094static void default_wait_pipe(struct trace_iterator *iter)
1095{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter, iter->cpu_file))
1098 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001099
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001100 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001101}
1102
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001103#ifdef CONFIG_FTRACE_STARTUP_TEST
1104static int run_tracer_selftest(struct tracer *type)
1105{
1106 struct trace_array *tr = &global_trace;
1107 struct tracer *saved_tracer = tr->current_trace;
1108 int ret;
1109
1110 if (!type->selftest || tracing_selftest_disabled)
1111 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112
1113 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 * Run a selftest on this tracer.
1115 * Here we reset the trace buffer, and set the current
1116 * tracer to be this tracer. The tracer can then run some
1117 * internal tracing to verify that everything is in order.
1118 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001119 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001120 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001121
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001122 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001124#ifdef CONFIG_TRACER_MAX_TRACE
1125 if (type->use_max_tr) {
1126 /* If we expanded the buffers, make sure the max is expanded too */
1127 if (ring_buffer_expanded)
1128 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1129 RING_BUFFER_ALL_CPUS);
1130 tr->allocated_snapshot = true;
1131 }
1132#endif
1133
1134 /* the test is responsible for initializing and enabling */
1135 pr_info("Testing tracer %s: ", type->name);
1136 ret = type->selftest(type, tr);
1137 /* the test is responsible for resetting too */
1138 tr->current_trace = saved_tracer;
1139 if (ret) {
1140 printk(KERN_CONT "FAILED!\n");
1141 /* Add the warning after printing 'FAILED' */
1142 WARN_ON(1);
1143 return -1;
1144 }
1145 /* Only reset on passing, to avoid touching corrupted buffers */
1146 tracing_reset_online_cpus(&tr->trace_buffer);
1147
1148#ifdef CONFIG_TRACER_MAX_TRACE
1149 if (type->use_max_tr) {
1150 tr->allocated_snapshot = false;
1151
1152 /* Shrink the max buffer again */
1153 if (ring_buffer_expanded)
1154 ring_buffer_resize(tr->max_buffer.buffer, 1,
1155 RING_BUFFER_ALL_CPUS);
1156 }
1157#endif
1158
1159 printk(KERN_CONT "PASSED\n");
1160 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001161}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001162#else
1163static inline int run_tracer_selftest(struct tracer *type)
1164{
1165 return 0;
1166}
1167#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001168
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001169/**
1170 * register_tracer - register a tracer with the ftrace system.
1171 * @type - the plugin for the tracer
1172 *
1173 * Register a new plugin tracer.
1174 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001175int register_tracer(struct tracer *type)
1176{
1177 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178 int ret = 0;
1179
1180 if (!type->name) {
1181 pr_info("Tracer must have a name\n");
1182 return -1;
1183 }
1184
Dan Carpenter24a461d2010-07-10 12:06:44 +02001185 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001186 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1187 return -1;
1188 }
1189
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001190 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001191
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001192 tracing_selftest_running = true;
1193
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001194 for (t = trace_types; t; t = t->next) {
1195 if (strcmp(type->name, t->name) == 0) {
1196 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001197 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001198 type->name);
1199 ret = -1;
1200 goto out;
1201 }
1202 }
1203
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001204 if (!type->set_flag)
1205 type->set_flag = &dummy_set_flag;
1206 if (!type->flags)
1207 type->flags = &dummy_tracer_flags;
1208 else
1209 if (!type->flags->opts)
1210 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001211 if (!type->wait_pipe)
1212 type->wait_pipe = default_wait_pipe;
1213
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001214 ret = run_tracer_selftest(type);
1215 if (ret < 0)
1216 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 type->next = trace_types;
1219 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001220
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001221 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001222 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001223 mutex_unlock(&trace_types_lock);
1224
Steven Rostedtdac74942009-02-05 01:13:38 -05001225 if (ret || !default_bootup_tracer)
1226 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001227
Li Zefanee6c2c12009-09-18 14:06:47 +08001228 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001229 goto out_unlock;
1230
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name);
1234 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001236 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001237#ifdef CONFIG_FTRACE_STARTUP_TEST
1238 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1239 type->name);
1240#endif
1241
1242 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001243 return ret;
1244}
1245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001247{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001248 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001249
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001250 if (!buffer)
1251 return;
1252
Steven Rostedtf6339032009-09-04 12:35:16 -04001253 ring_buffer_record_disable(buffer);
1254
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001257 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001258
1259 ring_buffer_record_enable(buffer);
1260}
1261
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001262void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001263{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001264 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001265 int cpu;
1266
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001267 if (!buffer)
1268 return;
1269
Steven Rostedt621968c2009-09-04 12:02:35 -04001270 ring_buffer_record_disable(buffer);
1271
1272 /* Make sure all commits have finished */
1273 synchronize_sched();
1274
Alexander Z Lam94571582013-08-02 18:36:16 -07001275 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001276
1277 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001278 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001279
1280 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001281}
1282
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001283/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001284void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001285{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001286 struct trace_array *tr;
1287
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001289 tracing_reset_online_cpus(&tr->trace_buffer);
1290#ifdef CONFIG_TRACER_MAX_TRACE
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001293 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001294}
1295
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001296#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001297#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1299static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1300static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1301static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001302static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001303
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001305static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306
1307static void trace_init_cmdlines(void)
1308{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001309 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1310 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001311 cmdline_idx = 0;
1312}
1313
Carsten Emdeb5130b12009-09-13 01:43:07 +02001314int is_tracing_stopped(void)
1315{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001316 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001317}
1318
Steven Rostedt0f048702008-11-05 16:05:44 -05001319/**
1320 * tracing_start - quick start of the tracer
1321 *
1322 * If tracing is enabled but was stopped by tracing_stop,
1323 * this will start the tracer back up.
1324 */
1325void tracing_start(void)
1326{
1327 struct ring_buffer *buffer;
1328 unsigned long flags;
1329
1330 if (tracing_disabled)
1331 return;
1332
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001333 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1334 if (--global_trace.stop_count) {
1335 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001336 /* Someone screwed up their debugging */
1337 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001338 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001339 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001340 goto out;
1341 }
1342
Steven Rostedta2f80712010-03-12 19:56:00 -05001343 /* Prevent the buffers from switching */
1344 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001345
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001347 if (buffer)
1348 ring_buffer_record_enable(buffer);
1349
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001350#ifdef CONFIG_TRACER_MAX_TRACE
1351 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001352 if (buffer)
1353 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001354#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001355
Steven Rostedta2f80712010-03-12 19:56:00 -05001356 arch_spin_unlock(&ftrace_max_lock);
1357
Steven Rostedt0f048702008-11-05 16:05:44 -05001358 ftrace_start();
1359 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001360 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1361}
1362
1363static void tracing_start_tr(struct trace_array *tr)
1364{
1365 struct ring_buffer *buffer;
1366 unsigned long flags;
1367
1368 if (tracing_disabled)
1369 return;
1370
1371 /* If global, we need to also start the max tracer */
1372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1373 return tracing_start();
1374
1375 raw_spin_lock_irqsave(&tr->start_lock, flags);
1376
1377 if (--tr->stop_count) {
1378 if (tr->stop_count < 0) {
1379 /* Someone screwed up their debugging */
1380 WARN_ON_ONCE(1);
1381 tr->stop_count = 0;
1382 }
1383 goto out;
1384 }
1385
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001386 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001387 if (buffer)
1388 ring_buffer_record_enable(buffer);
1389
1390 out:
1391 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001392}
1393
1394/**
1395 * tracing_stop - quick stop of the tracer
1396 *
1397 * Light weight way to stop tracing. Use in conjunction with
1398 * tracing_start.
1399 */
1400void tracing_stop(void)
1401{
1402 struct ring_buffer *buffer;
1403 unsigned long flags;
1404
1405 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001406 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1407 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001408 goto out;
1409
Steven Rostedta2f80712010-03-12 19:56:00 -05001410 /* Prevent the buffers from switching */
1411 arch_spin_lock(&ftrace_max_lock);
1412
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001414 if (buffer)
1415 ring_buffer_record_disable(buffer);
1416
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001417#ifdef CONFIG_TRACER_MAX_TRACE
1418 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001419 if (buffer)
1420 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001421#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001422
Steven Rostedta2f80712010-03-12 19:56:00 -05001423 arch_spin_unlock(&ftrace_max_lock);
1424
Steven Rostedt0f048702008-11-05 16:05:44 -05001425 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001426 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1427}
1428
1429static void tracing_stop_tr(struct trace_array *tr)
1430{
1431 struct ring_buffer *buffer;
1432 unsigned long flags;
1433
1434 /* If global, we need to also stop the max tracer */
1435 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1436 return tracing_stop();
1437
1438 raw_spin_lock_irqsave(&tr->start_lock, flags);
1439 if (tr->stop_count++)
1440 goto out;
1441
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001442 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001443 if (buffer)
1444 ring_buffer_record_disable(buffer);
1445
1446 out:
1447 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001448}
1449
Ingo Molnare309b412008-05-12 21:20:51 +02001450void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001451
Ingo Molnare309b412008-05-12 21:20:51 +02001452static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001453{
Carsten Emdea635cf02009-03-18 09:00:41 +01001454 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001455
1456 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1457 return;
1458
1459 /*
1460 * It's not the end of the world if we don't get
1461 * the lock, but we also don't want to spin
1462 * nor do we want to disable interrupts,
1463 * so if we miss here, then better luck next time.
1464 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001465 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001466 return;
1467
1468 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001469 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1471
Carsten Emdea635cf02009-03-18 09:00:41 +01001472 /*
1473 * Check whether the cmdline buffer at idx has a pid
1474 * mapped. We are going to overwrite that entry so we
1475 * need to clear the map_pid_to_cmdline. Otherwise we
1476 * would read the new comm for the old pid.
1477 */
1478 pid = map_cmdline_to_pid[idx];
1479 if (pid != NO_CMDLINE_MAP)
1480 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481
Carsten Emdea635cf02009-03-18 09:00:41 +01001482 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483 map_pid_to_cmdline[tsk->pid] = idx;
1484
1485 cmdline_idx = idx;
1486 }
1487
1488 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1489
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001490 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001491}
1492
Steven Rostedt4ca53082009-03-16 19:20:15 -04001493void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495 unsigned map;
1496
Steven Rostedt4ca53082009-03-16 19:20:15 -04001497 if (!pid) {
1498 strcpy(comm, "<idle>");
1499 return;
1500 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501
Steven Rostedt74bf4072010-01-25 15:11:53 -05001502 if (WARN_ON_ONCE(pid < 0)) {
1503 strcpy(comm, "<XXX>");
1504 return;
1505 }
1506
Steven Rostedt4ca53082009-03-16 19:20:15 -04001507 if (pid > PID_MAX_DEFAULT) {
1508 strcpy(comm, "<...>");
1509 return;
1510 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001512 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001513 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001515 if (map != NO_CMDLINE_MAP)
1516 strcpy(comm, saved_cmdlines[map]);
1517 else
1518 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001520 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001521 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522}
1523
Ingo Molnare309b412008-05-12 21:20:51 +02001524void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001526 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 return;
1528
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001529 if (!__this_cpu_read(trace_cmdline_save))
1530 return;
1531
1532 __this_cpu_write(trace_cmdline_save, false);
1533
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534 trace_save_cmdline(tsk);
1535}
1536
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001537void
Steven Rostedt38697052008-10-01 13:14:09 -04001538tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1539 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
1541 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Steven Rostedt777e2082008-09-29 23:02:42 -04001543 entry->preempt_count = pc & 0xff;
1544 entry->pid = (tsk) ? tsk->pid : 0;
1545 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001546#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001547 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001548#else
1549 TRACE_FLAG_IRQS_NOSUPPORT |
1550#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001551 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1552 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001553 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1554 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001555}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001556EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Steven Rostedte77405a2009-09-02 14:17:06 -04001558struct ring_buffer_event *
1559trace_buffer_lock_reserve(struct ring_buffer *buffer,
1560 int type,
1561 unsigned long len,
1562 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001563{
1564 struct ring_buffer_event *event;
1565
Steven Rostedte77405a2009-09-02 14:17:06 -04001566 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001567 if (event != NULL) {
1568 struct trace_entry *ent = ring_buffer_event_data(event);
1569
1570 tracing_generic_entry_update(ent, flags, pc);
1571 ent->type = type;
1572 }
1573
1574 return event;
1575}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001576
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001577void
1578__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1579{
1580 __this_cpu_write(trace_cmdline_save, true);
1581 ring_buffer_unlock_commit(buffer, event);
1582}
1583
Steven Rostedte77405a2009-09-02 14:17:06 -04001584static inline void
1585__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1586 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001587 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001588{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001589 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001590
Steven Rostedte77405a2009-09-02 14:17:06 -04001591 ftrace_trace_stack(buffer, flags, 6, pc);
1592 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001593}
1594
Steven Rostedte77405a2009-09-02 14:17:06 -04001595void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
1597 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001598{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001599 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001600}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001602
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001603static struct ring_buffer *temp_buffer;
1604
Steven Rostedtef5580d2009-02-27 19:38:04 -05001605struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001606trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1607 struct ftrace_event_file *ftrace_file,
1608 int type, unsigned long len,
1609 unsigned long flags, int pc)
1610{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001611 struct ring_buffer_event *entry;
1612
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001613 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001614 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001615 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001616 /*
1617 * If tracing is off, but we have triggers enabled
1618 * we still need to look at the event data. Use the temp_buffer
1619 * to store the trace event for the tigger to use. It's recusive
1620 * safe and will not be recorded anywhere.
1621 */
1622 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1623 *current_rb = temp_buffer;
1624 entry = trace_buffer_lock_reserve(*current_rb,
1625 type, len, flags, pc);
1626 }
1627 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001628}
1629EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1630
1631struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001632trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1633 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001634 unsigned long flags, int pc)
1635{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001636 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001637 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001638 type, len, flags, pc);
1639}
Steven Rostedt94487d62009-05-05 19:22:53 -04001640EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001641
Steven Rostedte77405a2009-09-02 14:17:06 -04001642void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1643 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001644 unsigned long flags, int pc)
1645{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001646 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001647}
Steven Rostedt94487d62009-05-05 19:22:53 -04001648EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001649
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1651 struct ring_buffer_event *event,
1652 unsigned long flags, int pc,
1653 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001654{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001655 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001656
1657 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1658 ftrace_trace_userstack(buffer, flags, pc);
1659}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001660EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001661
Steven Rostedte77405a2009-09-02 14:17:06 -04001662void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1663 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001664{
Steven Rostedte77405a2009-09-02 14:17:06 -04001665 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001666}
Steven Rostedt12acd472009-04-17 16:01:56 -04001667EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001668
Ingo Molnare309b412008-05-12 21:20:51 +02001669void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001670trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001671 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1672 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001673{
Tom Zanussie1112b42009-03-31 00:48:49 -05001674 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001675 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001676 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001677 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001678
Steven Rostedtd7690412008-10-01 00:29:53 -04001679 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001680 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001681 return;
1682
Steven Rostedte77405a2009-09-02 14:17:06 -04001683 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001684 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001685 if (!event)
1686 return;
1687 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001688 entry->ip = ip;
1689 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001690
Tom Zanussif306cc82013-10-24 08:34:17 -05001691 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001692 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001693}
1694
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001695#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001696
1697#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1698struct ftrace_stack {
1699 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1700};
1701
1702static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1703static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1704
Steven Rostedte77405a2009-09-02 14:17:06 -04001705static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001706 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001708{
Tom Zanussie1112b42009-03-31 00:48:49 -05001709 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001710 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001711 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001712 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001713 int use_stack;
1714 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001715
1716 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001717 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001718
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001719 /*
1720 * Since events can happen in NMIs there's no safe way to
1721 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1722 * or NMI comes in, it will just have to use the default
1723 * FTRACE_STACK_SIZE.
1724 */
1725 preempt_disable_notrace();
1726
Shan Wei82146522012-11-19 13:21:01 +08001727 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001728 /*
1729 * We don't need any atomic variables, just a barrier.
1730 * If an interrupt comes in, we don't care, because it would
1731 * have exited and put the counter back to what we want.
1732 * We just need a barrier to keep gcc from moving things
1733 * around.
1734 */
1735 barrier();
1736 if (use_stack == 1) {
1737 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1738 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1739
1740 if (regs)
1741 save_stack_trace_regs(regs, &trace);
1742 else
1743 save_stack_trace(&trace);
1744
1745 if (trace.nr_entries > size)
1746 size = trace.nr_entries;
1747 } else
1748 /* From now on, use_stack is a boolean */
1749 use_stack = 0;
1750
1751 size *= sizeof(unsigned long);
1752
1753 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1754 sizeof(*entry) + size, flags, pc);
1755 if (!event)
1756 goto out;
1757 entry = ring_buffer_event_data(event);
1758
1759 memset(&entry->caller, 0, size);
1760
1761 if (use_stack)
1762 memcpy(&entry->caller, trace.entries,
1763 trace.nr_entries * sizeof(unsigned long));
1764 else {
1765 trace.max_entries = FTRACE_STACK_ENTRIES;
1766 trace.entries = entry->caller;
1767 if (regs)
1768 save_stack_trace_regs(regs, &trace);
1769 else
1770 save_stack_trace(&trace);
1771 }
1772
1773 entry->size = trace.nr_entries;
1774
Tom Zanussif306cc82013-10-24 08:34:17 -05001775 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001776 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001777
1778 out:
1779 /* Again, don't let gcc optimize things here */
1780 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001781 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001782 preempt_enable_notrace();
1783
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001784}
1785
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001786void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1787 int skip, int pc, struct pt_regs *regs)
1788{
1789 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1790 return;
1791
1792 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1793}
1794
Steven Rostedte77405a2009-09-02 14:17:06 -04001795void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1796 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001797{
1798 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1799 return;
1800
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001801 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001802}
1803
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001804void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1805 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001806{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001807 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001808}
1809
Steven Rostedt03889382009-12-11 09:48:22 -05001810/**
1811 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001812 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001813 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001814void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001815{
1816 unsigned long flags;
1817
1818 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001819 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001820
1821 local_save_flags(flags);
1822
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001823 /*
1824 * Skip 3 more, seems to get us at the caller of
1825 * this function.
1826 */
1827 skip += 3;
1828 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1829 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001830}
1831
Steven Rostedt91e86e52010-11-10 12:56:12 +01001832static DEFINE_PER_CPU(int, user_stack_count);
1833
Steven Rostedte77405a2009-09-02 14:17:06 -04001834void
1835ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001836{
Tom Zanussie1112b42009-03-31 00:48:49 -05001837 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001838 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001839 struct userstack_entry *entry;
1840 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001841
1842 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1843 return;
1844
Steven Rostedtb6345872010-03-12 20:03:30 -05001845 /*
1846 * NMIs can not handle page faults, even with fix ups.
1847 * The save user stack can (and often does) fault.
1848 */
1849 if (unlikely(in_nmi()))
1850 return;
1851
Steven Rostedt91e86e52010-11-10 12:56:12 +01001852 /*
1853 * prevent recursion, since the user stack tracing may
1854 * trigger other kernel events.
1855 */
1856 preempt_disable();
1857 if (__this_cpu_read(user_stack_count))
1858 goto out;
1859
1860 __this_cpu_inc(user_stack_count);
1861
Steven Rostedte77405a2009-09-02 14:17:06 -04001862 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001863 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001864 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001865 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001866 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001867
Steven Rostedt48659d32009-09-11 11:36:23 -04001868 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001869 memset(&entry->caller, 0, sizeof(entry->caller));
1870
1871 trace.nr_entries = 0;
1872 trace.max_entries = FTRACE_STACK_ENTRIES;
1873 trace.skip = 0;
1874 trace.entries = entry->caller;
1875
1876 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001877 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001878 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001879
Li Zefan1dbd1952010-12-09 15:47:56 +08001880 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001881 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001882 out:
1883 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001884}
1885
Hannes Eder4fd27352009-02-10 19:44:12 +01001886#ifdef UNUSED
1887static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001888{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001889 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001890}
Hannes Eder4fd27352009-02-10 19:44:12 +01001891#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001892
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001893#endif /* CONFIG_STACKTRACE */
1894
Steven Rostedt07d777f2011-09-22 14:01:55 -04001895/* created for use with alloc_percpu */
1896struct trace_buffer_struct {
1897 char buffer[TRACE_BUF_SIZE];
1898};
1899
1900static struct trace_buffer_struct *trace_percpu_buffer;
1901static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1902static struct trace_buffer_struct *trace_percpu_irq_buffer;
1903static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1904
1905/*
1906 * The buffer used is dependent on the context. There is a per cpu
1907 * buffer for normal context, softirq contex, hard irq context and
1908 * for NMI context. Thise allows for lockless recording.
1909 *
1910 * Note, if the buffers failed to be allocated, then this returns NULL
1911 */
1912static char *get_trace_buf(void)
1913{
1914 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001915
1916 /*
1917 * If we have allocated per cpu buffers, then we do not
1918 * need to do any locking.
1919 */
1920 if (in_nmi())
1921 percpu_buffer = trace_percpu_nmi_buffer;
1922 else if (in_irq())
1923 percpu_buffer = trace_percpu_irq_buffer;
1924 else if (in_softirq())
1925 percpu_buffer = trace_percpu_sirq_buffer;
1926 else
1927 percpu_buffer = trace_percpu_buffer;
1928
1929 if (!percpu_buffer)
1930 return NULL;
1931
Shan Weid8a03492012-11-13 09:53:04 +08001932 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001933}
1934
1935static int alloc_percpu_trace_buffer(void)
1936{
1937 struct trace_buffer_struct *buffers;
1938 struct trace_buffer_struct *sirq_buffers;
1939 struct trace_buffer_struct *irq_buffers;
1940 struct trace_buffer_struct *nmi_buffers;
1941
1942 buffers = alloc_percpu(struct trace_buffer_struct);
1943 if (!buffers)
1944 goto err_warn;
1945
1946 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1947 if (!sirq_buffers)
1948 goto err_sirq;
1949
1950 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1951 if (!irq_buffers)
1952 goto err_irq;
1953
1954 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1955 if (!nmi_buffers)
1956 goto err_nmi;
1957
1958 trace_percpu_buffer = buffers;
1959 trace_percpu_sirq_buffer = sirq_buffers;
1960 trace_percpu_irq_buffer = irq_buffers;
1961 trace_percpu_nmi_buffer = nmi_buffers;
1962
1963 return 0;
1964
1965 err_nmi:
1966 free_percpu(irq_buffers);
1967 err_irq:
1968 free_percpu(sirq_buffers);
1969 err_sirq:
1970 free_percpu(buffers);
1971 err_warn:
1972 WARN(1, "Could not allocate percpu trace_printk buffer");
1973 return -ENOMEM;
1974}
1975
Steven Rostedt81698832012-10-11 10:15:05 -04001976static int buffers_allocated;
1977
Steven Rostedt07d777f2011-09-22 14:01:55 -04001978void trace_printk_init_buffers(void)
1979{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001980 if (buffers_allocated)
1981 return;
1982
1983 if (alloc_percpu_trace_buffer())
1984 return;
1985
1986 pr_info("ftrace: Allocated trace_printk buffers\n");
1987
Steven Rostedtb382ede62012-10-10 21:44:34 -04001988 /* Expand the buffers to set size */
1989 tracing_update_buffers();
1990
Steven Rostedt07d777f2011-09-22 14:01:55 -04001991 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001992
1993 /*
1994 * trace_printk_init_buffers() can be called by modules.
1995 * If that happens, then we need to start cmdline recording
1996 * directly here. If the global_trace.buffer is already
1997 * allocated here, then this was called by module code.
1998 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001999 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002000 tracing_start_cmdline_record();
2001}
2002
2003void trace_printk_start_comm(void)
2004{
2005 /* Start tracing comms if trace printk is set */
2006 if (!buffers_allocated)
2007 return;
2008 tracing_start_cmdline_record();
2009}
2010
2011static void trace_printk_start_stop_comm(int enabled)
2012{
2013 if (!buffers_allocated)
2014 return;
2015
2016 if (enabled)
2017 tracing_start_cmdline_record();
2018 else
2019 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002020}
2021
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002022/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002023 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002024 *
2025 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002026int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002027{
Tom Zanussie1112b42009-03-31 00:48:49 -05002028 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002029 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002030 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002031 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002032 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002033 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002034 char *tbuffer;
2035 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002036
2037 if (unlikely(tracing_selftest_running || tracing_disabled))
2038 return 0;
2039
2040 /* Don't pollute graph traces with trace_vprintk internals */
2041 pause_graph_tracing();
2042
2043 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002044 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002045
Steven Rostedt07d777f2011-09-22 14:01:55 -04002046 tbuffer = get_trace_buf();
2047 if (!tbuffer) {
2048 len = 0;
2049 goto out;
2050 }
2051
2052 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2053
2054 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002055 goto out;
2056
Steven Rostedt07d777f2011-09-22 14:01:55 -04002057 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002059 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002060 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2061 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002062 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002063 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002064 entry = ring_buffer_event_data(event);
2065 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002066 entry->fmt = fmt;
2067
Steven Rostedt07d777f2011-09-22 14:01:55 -04002068 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002069 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002070 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002071 ftrace_trace_stack(buffer, flags, 6, pc);
2072 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002073
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002075 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002076 unpause_graph_tracing();
2077
2078 return len;
2079}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002080EXPORT_SYMBOL_GPL(trace_vbprintk);
2081
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002082static int
2083__trace_array_vprintk(struct ring_buffer *buffer,
2084 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085{
Tom Zanussie1112b42009-03-31 00:48:49 -05002086 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002087 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002088 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002089 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002090 unsigned long flags;
2091 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002092
2093 if (tracing_disabled || tracing_selftest_running)
2094 return 0;
2095
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 /* Don't pollute graph traces with trace_vprintk internals */
2097 pause_graph_tracing();
2098
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002099 pc = preempt_count();
2100 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002101
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102
2103 tbuffer = get_trace_buf();
2104 if (!tbuffer) {
2105 len = 0;
2106 goto out;
2107 }
2108
2109 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2110 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002111 goto out;
2112
Steven Rostedt07d777f2011-09-22 14:01:55 -04002113 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002114 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002115 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002116 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002117 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002118 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002119 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002120 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002121
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002123 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002124 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002125 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002127 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002128 out:
2129 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002130 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002131
2132 return len;
2133}
Steven Rostedt659372d2009-09-03 19:11:07 -04002134
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002135int trace_array_vprintk(struct trace_array *tr,
2136 unsigned long ip, const char *fmt, va_list args)
2137{
2138 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2139}
2140
2141int trace_array_printk(struct trace_array *tr,
2142 unsigned long ip, const char *fmt, ...)
2143{
2144 int ret;
2145 va_list ap;
2146
2147 if (!(trace_flags & TRACE_ITER_PRINTK))
2148 return 0;
2149
2150 va_start(ap, fmt);
2151 ret = trace_array_vprintk(tr, ip, fmt, ap);
2152 va_end(ap);
2153 return ret;
2154}
2155
2156int trace_array_printk_buf(struct ring_buffer *buffer,
2157 unsigned long ip, const char *fmt, ...)
2158{
2159 int ret;
2160 va_list ap;
2161
2162 if (!(trace_flags & TRACE_ITER_PRINTK))
2163 return 0;
2164
2165 va_start(ap, fmt);
2166 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2167 va_end(ap);
2168 return ret;
2169}
2170
Steven Rostedt659372d2009-09-03 19:11:07 -04002171int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2172{
Steven Rostedta813a152009-10-09 01:41:35 -04002173 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002174}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002175EXPORT_SYMBOL_GPL(trace_vprintk);
2176
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002177static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002178{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002179 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2180
Steven Rostedt5a90f572008-09-03 17:42:51 -04002181 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002182 if (buf_iter)
2183 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002184}
2185
Ingo Molnare309b412008-05-12 21:20:51 +02002186static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002187peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2188 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002189{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002190 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002191 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002192
Steven Rostedtd7690412008-10-01 00:29:53 -04002193 if (buf_iter)
2194 event = ring_buffer_iter_peek(buf_iter, ts);
2195 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002196 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002197 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002198
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002199 if (event) {
2200 iter->ent_size = ring_buffer_event_length(event);
2201 return ring_buffer_event_data(event);
2202 }
2203 iter->ent_size = 0;
2204 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002205}
Steven Rostedtd7690412008-10-01 00:29:53 -04002206
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002207static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002208__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2209 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002210{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002211 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002212 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002213 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002214 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002215 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002216 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002217 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002218 int cpu;
2219
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002220 /*
2221 * If we are in a per_cpu trace file, don't bother by iterating over
2222 * all cpu and peek directly.
2223 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002224 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002225 if (ring_buffer_empty_cpu(buffer, cpu_file))
2226 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002227 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002228 if (ent_cpu)
2229 *ent_cpu = cpu_file;
2230
2231 return ent;
2232 }
2233
Steven Rostedtab464282008-05-12 21:21:00 +02002234 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002235
2236 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002237 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002238
Steven Rostedtbc21b472010-03-31 19:49:26 -04002239 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002240
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002241 /*
2242 * Pick the entry with the smallest timestamp:
2243 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002244 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245 next = ent;
2246 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002247 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002248 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002249 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002250 }
2251 }
2252
Steven Rostedt12b5da32012-03-27 10:43:28 -04002253 iter->ent_size = next_size;
2254
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255 if (ent_cpu)
2256 *ent_cpu = next_cpu;
2257
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002258 if (ent_ts)
2259 *ent_ts = next_ts;
2260
Steven Rostedtbc21b472010-03-31 19:49:26 -04002261 if (missing_events)
2262 *missing_events = next_lost;
2263
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002264 return next;
2265}
2266
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002268struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2269 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002270{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002272}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002273
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002274/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002275void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002276{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002277 iter->ent = __find_next_entry(iter, &iter->cpu,
2278 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002279
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002280 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002281 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002282
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002283 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002284}
2285
Ingo Molnare309b412008-05-12 21:20:51 +02002286static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002287{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002288 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002290}
2291
Ingo Molnare309b412008-05-12 21:20:51 +02002292static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002293{
2294 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002295 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002296 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002298 WARN_ON_ONCE(iter->leftover);
2299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 (*pos)++;
2301
2302 /* can't go backwards */
2303 if (iter->idx > i)
2304 return NULL;
2305
2306 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002307 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308 else
2309 ent = iter;
2310
2311 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002312 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313
2314 iter->pos = *pos;
2315
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316 return ent;
2317}
2318
Jason Wessel955b61e2010-08-05 09:22:23 -05002319void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002320{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002321 struct ring_buffer_event *event;
2322 struct ring_buffer_iter *buf_iter;
2323 unsigned long entries = 0;
2324 u64 ts;
2325
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002326 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002327
Steven Rostedt6d158a82012-06-27 20:46:14 -04002328 buf_iter = trace_buffer_iter(iter, cpu);
2329 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002330 return;
2331
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002332 ring_buffer_iter_reset(buf_iter);
2333
2334 /*
2335 * We could have the case with the max latency tracers
2336 * that a reset never took place on a cpu. This is evident
2337 * by the timestamp being before the start of the buffer.
2338 */
2339 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002340 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002341 break;
2342 entries++;
2343 ring_buffer_read(buf_iter, NULL);
2344 }
2345
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002346 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002347}
2348
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002349/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002350 * The current tracer is copied to avoid a global locking
2351 * all around.
2352 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353static void *s_start(struct seq_file *m, loff_t *pos)
2354{
2355 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002356 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002357 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002358 void *p = NULL;
2359 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002360 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002361
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002362 /*
2363 * copy the tracer to avoid using a global lock all around.
2364 * iter->trace is a copy of current_trace, the pointer to the
2365 * name may be used instead of a strcmp(), as iter->trace->name
2366 * will point to the same string as current_trace->name.
2367 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002369 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2370 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002371 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002373#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002374 if (iter->snapshot && iter->trace->use_max_tr)
2375 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002376#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002377
2378 if (!iter->snapshot)
2379 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381 if (*pos != iter->pos) {
2382 iter->ent = NULL;
2383 iter->cpu = 0;
2384 iter->idx = -1;
2385
Steven Rostedtae3b5092013-01-23 15:22:59 -05002386 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002387 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002388 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002389 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391
Lai Jiangshanac91d852010-03-02 17:54:50 +08002392 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002393 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2394 ;
2395
2396 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002397 /*
2398 * If we overflowed the seq_file before, then we want
2399 * to just reuse the trace_seq buffer again.
2400 */
2401 if (iter->leftover)
2402 p = iter;
2403 else {
2404 l = *pos - 1;
2405 p = s_next(m, p, &l);
2406 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407 }
2408
Lai Jiangshan4f535962009-05-18 19:35:34 +08002409 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002410 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411 return p;
2412}
2413
2414static void s_stop(struct seq_file *m, void *p)
2415{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002416 struct trace_iterator *iter = m->private;
2417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002418#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002419 if (iter->snapshot && iter->trace->use_max_tr)
2420 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002421#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002422
2423 if (!iter->snapshot)
2424 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002426 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002427 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428}
2429
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002430static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431get_total_entries(struct trace_buffer *buf,
2432 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002433{
2434 unsigned long count;
2435 int cpu;
2436
2437 *total = 0;
2438 *entries = 0;
2439
2440 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002441 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002442 /*
2443 * If this buffer has skipped entries, then we hold all
2444 * entries for the trace and we need to ignore the
2445 * ones before the time stamp.
2446 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2448 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002449 /* total is the same as the entries */
2450 *total += count;
2451 } else
2452 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002453 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002454 *entries += count;
2455 }
2456}
2457
Ingo Molnare309b412008-05-12 21:20:51 +02002458static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459{
Michael Ellermana6168352008-08-20 16:36:11 -07002460 seq_puts(m, "# _------=> CPU# \n");
2461 seq_puts(m, "# / _-----=> irqs-off \n");
2462 seq_puts(m, "# | / _----=> need-resched \n");
2463 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2464 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002465 seq_puts(m, "# |||| / delay \n");
2466 seq_puts(m, "# cmd pid ||||| time | caller \n");
2467 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002468}
2469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002470static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002472 unsigned long total;
2473 unsigned long entries;
2474
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002476 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2477 entries, total, num_online_cpus());
2478 seq_puts(m, "#\n");
2479}
2480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002484 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002485 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002486}
2487
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002489{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002491 seq_puts(m, "# _-----=> irqs-off\n");
2492 seq_puts(m, "# / _----=> need-resched\n");
2493 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2494 seq_puts(m, "# || / _--=> preempt-depth\n");
2495 seq_puts(m, "# ||| / delay\n");
2496 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2497 seq_puts(m, "# | | | |||| | |\n");
2498}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002499
Jiri Olsa62b915f2010-04-02 19:01:22 +02002500void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002501print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2502{
2503 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002504 struct trace_buffer *buf = iter->trace_buffer;
2505 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002506 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002507 unsigned long entries;
2508 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509 const char *name = "preemption";
2510
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002511 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002512
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002513 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002514
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002515 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002516 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002517 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002518 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002519 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002521 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002523 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002524 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002525#if defined(CONFIG_PREEMPT_NONE)
2526 "server",
2527#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2528 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002529#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530 "preempt",
2531#else
2532 "unknown",
2533#endif
2534 /* These are reserved for later use */
2535 0, 0, 0, 0);
2536#ifdef CONFIG_SMP
2537 seq_printf(m, " #P:%d)\n", num_online_cpus());
2538#else
2539 seq_puts(m, ")\n");
2540#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002541 seq_puts(m, "# -----------------\n");
2542 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002543 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002544 data->comm, data->pid,
2545 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002546 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002547 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548
2549 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002550 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002551 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2552 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002554 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2555 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002556 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002557 }
2558
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002559 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002560}
2561
Steven Rostedta3097202008-11-07 22:36:02 -05002562static void test_cpu_buff_start(struct trace_iterator *iter)
2563{
2564 struct trace_seq *s = &iter->seq;
2565
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002566 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2567 return;
2568
2569 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2570 return;
2571
Rusty Russell44623442009-01-01 10:12:23 +10302572 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002573 return;
2574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002576 return;
2577
Rusty Russell44623442009-01-01 10:12:23 +10302578 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002579
2580 /* Don't print started cpu buffer for the first entry of the trace */
2581 if (iter->idx > 1)
2582 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2583 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002584}
2585
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002586static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587{
Steven Rostedt214023c2008-05-12 21:20:46 +02002588 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002589 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002590 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002591 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002593 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002594
Steven Rostedta3097202008-11-07 22:36:02 -05002595 test_cpu_buff_start(iter);
2596
Steven Rostedtf633cef2008-12-23 23:24:13 -05002597 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002598
2599 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002600 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2601 if (!trace_print_lat_context(iter))
2602 goto partial;
2603 } else {
2604 if (!trace_print_context(iter))
2605 goto partial;
2606 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002607 }
2608
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002609 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002610 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002611
2612 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2613 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002614
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002615 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002616partial:
2617 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002618}
2619
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002620static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002621{
2622 struct trace_seq *s = &iter->seq;
2623 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002624 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002625
2626 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002627
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002628 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002629 if (!trace_seq_printf(s, "%d %d %llu ",
2630 entry->pid, iter->cpu, iter->ts))
2631 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002632 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002633
Steven Rostedtf633cef2008-12-23 23:24:13 -05002634 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002635 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002636 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002637
2638 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2639 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002640
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002641 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002642partial:
2643 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002644}
2645
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002646static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002647{
2648 struct trace_seq *s = &iter->seq;
2649 unsigned char newline = '\n';
2650 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002651 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002652
2653 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002654
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002655 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2656 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2657 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2658 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2659 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002660
Steven Rostedtf633cef2008-12-23 23:24:13 -05002661 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002662 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002663 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002664 if (ret != TRACE_TYPE_HANDLED)
2665 return ret;
2666 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002667
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002668 SEQ_PUT_FIELD_RET(s, newline);
2669
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002670 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002671}
2672
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002673static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002674{
2675 struct trace_seq *s = &iter->seq;
2676 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002677 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002678
2679 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002680
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002681 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2682 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002683 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002684 SEQ_PUT_FIELD_RET(s, iter->ts);
2685 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002686
Steven Rostedtf633cef2008-12-23 23:24:13 -05002687 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002688 return event ? event->funcs->binary(iter, 0, event) :
2689 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002690}
2691
Jiri Olsa62b915f2010-04-02 19:01:22 +02002692int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002693{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002694 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002695 int cpu;
2696
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002697 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002698 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002699 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002700 buf_iter = trace_buffer_iter(iter, cpu);
2701 if (buf_iter) {
2702 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002703 return 0;
2704 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002705 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002706 return 0;
2707 }
2708 return 1;
2709 }
2710
Steven Rostedtab464282008-05-12 21:21:00 +02002711 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002712 buf_iter = trace_buffer_iter(iter, cpu);
2713 if (buf_iter) {
2714 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002715 return 0;
2716 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002717 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002718 return 0;
2719 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002720 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002721
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002722 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002723}
2724
Lai Jiangshan4f535962009-05-18 19:35:34 +08002725/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002726enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002727{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002728 enum print_line_t ret;
2729
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002730 if (iter->lost_events &&
2731 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2732 iter->cpu, iter->lost_events))
2733 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002734
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735 if (iter->trace && iter->trace->print_line) {
2736 ret = iter->trace->print_line(iter);
2737 if (ret != TRACE_TYPE_UNHANDLED)
2738 return ret;
2739 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002740
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002741 if (iter->ent->type == TRACE_BPUTS &&
2742 trace_flags & TRACE_ITER_PRINTK &&
2743 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2744 return trace_print_bputs_msg_only(iter);
2745
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002746 if (iter->ent->type == TRACE_BPRINT &&
2747 trace_flags & TRACE_ITER_PRINTK &&
2748 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002749 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002750
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002751 if (iter->ent->type == TRACE_PRINT &&
2752 trace_flags & TRACE_ITER_PRINTK &&
2753 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002754 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002755
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002756 if (trace_flags & TRACE_ITER_BIN)
2757 return print_bin_fmt(iter);
2758
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002759 if (trace_flags & TRACE_ITER_HEX)
2760 return print_hex_fmt(iter);
2761
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002762 if (trace_flags & TRACE_ITER_RAW)
2763 return print_raw_fmt(iter);
2764
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002765 return print_trace_fmt(iter);
2766}
2767
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002768void trace_latency_header(struct seq_file *m)
2769{
2770 struct trace_iterator *iter = m->private;
2771
2772 /* print nothing if the buffers are empty */
2773 if (trace_empty(iter))
2774 return;
2775
2776 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2777 print_trace_header(m, iter);
2778
2779 if (!(trace_flags & TRACE_ITER_VERBOSE))
2780 print_lat_help_header(m);
2781}
2782
Jiri Olsa62b915f2010-04-02 19:01:22 +02002783void trace_default_header(struct seq_file *m)
2784{
2785 struct trace_iterator *iter = m->private;
2786
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002787 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2788 return;
2789
Jiri Olsa62b915f2010-04-02 19:01:22 +02002790 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2791 /* print nothing if the buffers are empty */
2792 if (trace_empty(iter))
2793 return;
2794 print_trace_header(m, iter);
2795 if (!(trace_flags & TRACE_ITER_VERBOSE))
2796 print_lat_help_header(m);
2797 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002798 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2799 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002800 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002801 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002802 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002803 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002804 }
2805}
2806
Steven Rostedte0a413f2011-09-29 21:26:16 -04002807static void test_ftrace_alive(struct seq_file *m)
2808{
2809 if (!ftrace_is_dead())
2810 return;
2811 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2812 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2813}
2814
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002815#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002816static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002817{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002818 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2819 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2820 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002821 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002822 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m, "# is not a '0' or '1')\n");
2824}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002825
2826static void show_snapshot_percpu_help(struct seq_file *m)
2827{
2828 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2829#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2830 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2831 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2832#else
2833 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2834 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2835#endif
2836 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2837 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2838 seq_printf(m, "# is not a '0' or '1')\n");
2839}
2840
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002841static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2842{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002843 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002844 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2845 else
2846 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2847
2848 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002849 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2850 show_snapshot_main_help(m);
2851 else
2852 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002853}
2854#else
2855/* Should never be called */
2856static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2857#endif
2858
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002859static int s_show(struct seq_file *m, void *v)
2860{
2861 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002862 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002863
2864 if (iter->ent == NULL) {
2865 if (iter->tr) {
2866 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2867 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002868 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002870 if (iter->snapshot && trace_empty(iter))
2871 print_snapshot_help(m, iter);
2872 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002873 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002874 else
2875 trace_default_header(m);
2876
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002877 } else if (iter->leftover) {
2878 /*
2879 * If we filled the seq_file buffer earlier, we
2880 * want to just show it now.
2881 */
2882 ret = trace_print_seq(m, &iter->seq);
2883
2884 /* ret should this time be zero, but you never know */
2885 iter->leftover = ret;
2886
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002887 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002888 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002889 ret = trace_print_seq(m, &iter->seq);
2890 /*
2891 * If we overflow the seq_file buffer, then it will
2892 * ask us for this data again at start up.
2893 * Use that instead.
2894 * ret is 0 if seq_file write succeeded.
2895 * -1 otherwise.
2896 */
2897 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002898 }
2899
2900 return 0;
2901}
2902
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002903/*
2904 * Should be used after trace_array_get(), trace_types_lock
2905 * ensures that i_cdev was already initialized.
2906 */
2907static inline int tracing_get_cpu(struct inode *inode)
2908{
2909 if (inode->i_cdev) /* See trace_create_cpu_file() */
2910 return (long)inode->i_cdev - 1;
2911 return RING_BUFFER_ALL_CPUS;
2912}
2913
James Morris88e9d342009-09-22 16:43:43 -07002914static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002915 .start = s_start,
2916 .next = s_next,
2917 .stop = s_stop,
2918 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919};
2920
Ingo Molnare309b412008-05-12 21:20:51 +02002921static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002922__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002924 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002926 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002927
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002928 if (tracing_disabled)
2929 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002930
Jiri Olsa50e18b92012-04-25 10:23:39 +02002931 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002932 if (!iter)
2933 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002934
Steven Rostedt6d158a82012-06-27 20:46:14 -04002935 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2936 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002937 if (!iter->buffer_iter)
2938 goto release;
2939
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002940 /*
2941 * We make a copy of the current tracer to avoid concurrent
2942 * changes on it while we are reading.
2943 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002944 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002945 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002946 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002947 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002948
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002949 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002950
Li Zefan79f55992009-06-15 14:58:26 +08002951 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002952 goto fail;
2953
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002954 iter->tr = tr;
2955
2956#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002957 /* Currently only the top directory has a snapshot */
2958 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002959 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002961#endif
2962 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002963 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002964 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002965 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002966 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002968 /* Notify the tracer early; before we stop tracing. */
2969 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002970 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002971
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002972 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002973 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002974 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2975
David Sharp8be07092012-11-13 12:18:22 -08002976 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002977 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002978 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2979
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002980 /* stop the trace while dumping if we are not opening "snapshot" */
2981 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002982 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002983
Steven Rostedtae3b5092013-01-23 15:22:59 -05002984 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002985 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002986 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002987 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002988 }
2989 ring_buffer_read_prepare_sync();
2990 for_each_tracing_cpu(cpu) {
2991 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002992 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002993 }
2994 } else {
2995 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002996 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002997 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002998 ring_buffer_read_prepare_sync();
2999 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003000 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003001 }
3002
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003 mutex_unlock(&trace_types_lock);
3004
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003006
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003008 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003009 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003010 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003011release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003012 seq_release_private(inode, file);
3013 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014}
3015
3016int tracing_open_generic(struct inode *inode, struct file *filp)
3017{
Steven Rostedt60a11772008-05-12 21:20:44 +02003018 if (tracing_disabled)
3019 return -ENODEV;
3020
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021 filp->private_data = inode->i_private;
3022 return 0;
3023}
3024
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003025bool tracing_is_disabled(void)
3026{
3027 return (tracing_disabled) ? true: false;
3028}
3029
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003030/*
3031 * Open and update trace_array ref count.
3032 * Must have the current trace_array passed to it.
3033 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003034static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003035{
3036 struct trace_array *tr = inode->i_private;
3037
3038 if (tracing_disabled)
3039 return -ENODEV;
3040
3041 if (trace_array_get(tr) < 0)
3042 return -ENODEV;
3043
3044 filp->private_data = inode->i_private;
3045
3046 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003047}
3048
Hannes Eder4fd27352009-02-10 19:44:12 +01003049static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003051 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003052 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003053 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003054 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003056 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003057 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003058 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003059 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003060
Oleg Nesterov6484c712013-07-23 17:26:10 +02003061 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003062 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003064
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003065 for_each_tracing_cpu(cpu) {
3066 if (iter->buffer_iter[cpu])
3067 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3068 }
3069
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003070 if (iter->trace && iter->trace->close)
3071 iter->trace->close(iter);
3072
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003073 if (!iter->snapshot)
3074 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003075 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003076
3077 __trace_array_put(tr);
3078
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003079 mutex_unlock(&trace_types_lock);
3080
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003081 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003082 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003083 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003084 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003085 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003086
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003087 return 0;
3088}
3089
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003090static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3091{
3092 struct trace_array *tr = inode->i_private;
3093
3094 trace_array_put(tr);
3095 return 0;
3096}
3097
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003098static int tracing_single_release_tr(struct inode *inode, struct file *file)
3099{
3100 struct trace_array *tr = inode->i_private;
3101
3102 trace_array_put(tr);
3103
3104 return single_release(inode, file);
3105}
3106
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003107static int tracing_open(struct inode *inode, struct file *file)
3108{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003109 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003110 struct trace_iterator *iter;
3111 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003112
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003113 if (trace_array_get(tr) < 0)
3114 return -ENODEV;
3115
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003117 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3118 int cpu = tracing_get_cpu(inode);
3119
3120 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003121 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124 }
3125
3126 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003127 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003128 if (IS_ERR(iter))
3129 ret = PTR_ERR(iter);
3130 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3131 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3132 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003133
3134 if (ret < 0)
3135 trace_array_put(tr);
3136
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 return ret;
3138}
3139
Ingo Molnare309b412008-05-12 21:20:51 +02003140static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141t_next(struct seq_file *m, void *v, loff_t *pos)
3142{
Li Zefanf129e962009-06-24 09:53:44 +08003143 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144
3145 (*pos)++;
3146
3147 if (t)
3148 t = t->next;
3149
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003150 return t;
3151}
3152
3153static void *t_start(struct seq_file *m, loff_t *pos)
3154{
Li Zefanf129e962009-06-24 09:53:44 +08003155 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156 loff_t l = 0;
3157
3158 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08003159 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003160 ;
3161
3162 return t;
3163}
3164
3165static void t_stop(struct seq_file *m, void *p)
3166{
3167 mutex_unlock(&trace_types_lock);
3168}
3169
3170static int t_show(struct seq_file *m, void *v)
3171{
3172 struct tracer *t = v;
3173
3174 if (!t)
3175 return 0;
3176
3177 seq_printf(m, "%s", t->name);
3178 if (t->next)
3179 seq_putc(m, ' ');
3180 else
3181 seq_putc(m, '\n');
3182
3183 return 0;
3184}
3185
James Morris88e9d342009-09-22 16:43:43 -07003186static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003187 .start = t_start,
3188 .next = t_next,
3189 .stop = t_stop,
3190 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003191};
3192
3193static int show_traces_open(struct inode *inode, struct file *file)
3194{
Steven Rostedt60a11772008-05-12 21:20:44 +02003195 if (tracing_disabled)
3196 return -ENODEV;
3197
Li Zefanf129e962009-06-24 09:53:44 +08003198 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003199}
3200
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003201static ssize_t
3202tracing_write_stub(struct file *filp, const char __user *ubuf,
3203 size_t count, loff_t *ppos)
3204{
3205 return count;
3206}
3207
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003208loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003209{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003210 int ret;
3211
Slava Pestov364829b2010-11-24 15:13:16 -08003212 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003213 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003214 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003215 file->f_pos = ret = 0;
3216
3217 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003218}
3219
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003220static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003221 .open = tracing_open,
3222 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003223 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003224 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003225 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226};
3227
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003228static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003229 .open = show_traces_open,
3230 .read = seq_read,
3231 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003232 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003233};
3234
Ingo Molnar36dfe922008-05-12 21:20:52 +02003235/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003236 * The tracer itself will not take this lock, but still we want
3237 * to provide a consistent cpumask to user-space:
3238 */
3239static DEFINE_MUTEX(tracing_cpumask_update_lock);
3240
3241/*
3242 * Temporary storage for the character representation of the
3243 * CPU bitmask (and one more byte for the newline):
3244 */
3245static char mask_str[NR_CPUS + 1];
3246
Ingo Molnarc7078de2008-05-12 21:20:52 +02003247static ssize_t
3248tracing_cpumask_read(struct file *filp, char __user *ubuf,
3249 size_t count, loff_t *ppos)
3250{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003251 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003252 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003253
3254 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003255
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003256 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003257 if (count - len < 2) {
3258 count = -EINVAL;
3259 goto out_err;
3260 }
3261 len += sprintf(mask_str + len, "\n");
3262 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3263
3264out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003265 mutex_unlock(&tracing_cpumask_update_lock);
3266
3267 return count;
3268}
3269
3270static ssize_t
3271tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3272 size_t count, loff_t *ppos)
3273{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003274 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303275 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003276 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303277
3278 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3279 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003280
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303281 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003282 if (err)
3283 goto err_unlock;
3284
Li Zefan215368e2009-06-15 10:56:42 +08003285 mutex_lock(&tracing_cpumask_update_lock);
3286
Steven Rostedta5e25882008-12-02 15:34:05 -05003287 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003288 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003289 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003290 /*
3291 * Increase/decrease the disabled counter if we are
3292 * about to flip a bit in the cpumask:
3293 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003294 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303295 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003296 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3297 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003298 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003299 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303300 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003301 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3302 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003303 }
3304 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003305 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003306 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003307
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003308 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003309
Ingo Molnarc7078de2008-05-12 21:20:52 +02003310 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303311 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003312
Ingo Molnarc7078de2008-05-12 21:20:52 +02003313 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003314
3315err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003316 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003317
3318 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319}
3320
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003321static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003322 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003323 .read = tracing_cpumask_read,
3324 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003325 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003326 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327};
3328
Li Zefanfdb372e2009-12-08 11:15:59 +08003329static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003330{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003331 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003332 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003333 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003334 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003335
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003336 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003337 tracer_flags = tr->current_trace->flags->val;
3338 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003339
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003340 for (i = 0; trace_options[i]; i++) {
3341 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003342 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003343 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003344 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003345 }
3346
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003347 for (i = 0; trace_opts[i].name; i++) {
3348 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003349 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003350 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003351 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003352 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003353 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003354
Li Zefanfdb372e2009-12-08 11:15:59 +08003355 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356}
3357
Li Zefan8d18eaa2009-12-08 11:17:06 +08003358static int __set_tracer_option(struct tracer *trace,
3359 struct tracer_flags *tracer_flags,
3360 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003361{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003362 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003363
Li Zefan8d18eaa2009-12-08 11:17:06 +08003364 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003365 if (ret)
3366 return ret;
3367
3368 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003369 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003370 else
Zhaolei77708412009-08-07 18:53:21 +08003371 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003372 return 0;
3373}
3374
Li Zefan8d18eaa2009-12-08 11:17:06 +08003375/* Try to assign a tracer specific option */
3376static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3377{
3378 struct tracer_flags *tracer_flags = trace->flags;
3379 struct tracer_opt *opts = NULL;
3380 int i;
3381
3382 for (i = 0; tracer_flags->opts[i].name; i++) {
3383 opts = &tracer_flags->opts[i];
3384
3385 if (strcmp(cmp, opts->name) == 0)
3386 return __set_tracer_option(trace, trace->flags,
3387 opts, neg);
3388 }
3389
3390 return -EINVAL;
3391}
3392
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003393/* Some tracers require overwrite to stay enabled */
3394int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3395{
3396 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3397 return -1;
3398
3399 return 0;
3400}
3401
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003402int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003403{
3404 /* do nothing if flag is already set */
3405 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003406 return 0;
3407
3408 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003409 if (tr->current_trace->flag_changed)
3410 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003411 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003412
3413 if (enabled)
3414 trace_flags |= mask;
3415 else
3416 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003417
3418 if (mask == TRACE_ITER_RECORD_CMD)
3419 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003420
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003421 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003422 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003423#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003424 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003425#endif
3426 }
Steven Rostedt81698832012-10-11 10:15:05 -04003427
3428 if (mask == TRACE_ITER_PRINTK)
3429 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003430
3431 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003432}
3433
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003434static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003436 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003438 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 int i;
3440
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003441 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442
Li Zefan8d18eaa2009-12-08 11:17:06 +08003443 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 neg = 1;
3445 cmp += 2;
3446 }
3447
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003448 mutex_lock(&trace_types_lock);
3449
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003450 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003451 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003452 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453 break;
3454 }
3455 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003456
3457 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003458 if (!trace_options[i])
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003459 ret = set_tracer_option(tr->current_trace, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003460
3461 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003463 return ret;
3464}
3465
3466static ssize_t
3467tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3468 size_t cnt, loff_t *ppos)
3469{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003470 struct seq_file *m = filp->private_data;
3471 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003472 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003473 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003474
3475 if (cnt >= sizeof(buf))
3476 return -EINVAL;
3477
3478 if (copy_from_user(&buf, ubuf, cnt))
3479 return -EFAULT;
3480
Steven Rostedta8dd2172013-01-09 20:54:17 -05003481 buf[cnt] = 0;
3482
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003483 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003484 if (ret < 0)
3485 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003486
Jiri Olsacf8517c2009-10-23 19:36:16 -04003487 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488
3489 return cnt;
3490}
3491
Li Zefanfdb372e2009-12-08 11:15:59 +08003492static int tracing_trace_options_open(struct inode *inode, struct file *file)
3493{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003494 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003495 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003496
Li Zefanfdb372e2009-12-08 11:15:59 +08003497 if (tracing_disabled)
3498 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003499
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003500 if (trace_array_get(tr) < 0)
3501 return -ENODEV;
3502
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003503 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3504 if (ret < 0)
3505 trace_array_put(tr);
3506
3507 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003508}
3509
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003510static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003511 .open = tracing_trace_options_open,
3512 .read = seq_read,
3513 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003514 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003515 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003516};
3517
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003518static const char readme_msg[] =
3519 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003520 "# echo 0 > tracing_on : quick way to disable tracing\n"
3521 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3522 " Important files:\n"
3523 " trace\t\t\t- The static contents of the buffer\n"
3524 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3525 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3526 " current_tracer\t- function and latency tracers\n"
3527 " available_tracers\t- list of configured tracers for current_tracer\n"
3528 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3529 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3530 " trace_clock\t\t-change the clock used to order events\n"
3531 " local: Per cpu clock but may not be synced across CPUs\n"
3532 " global: Synced across CPUs but slows tracing down.\n"
3533 " counter: Not a clock, but just an increment\n"
3534 " uptime: Jiffy counter from time of boot\n"
3535 " perf: Same clock that perf events use\n"
3536#ifdef CONFIG_X86_64
3537 " x86-tsc: TSC cycle counter\n"
3538#endif
3539 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3540 " tracing_cpumask\t- Limit which CPUs to trace\n"
3541 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3542 "\t\t\t Remove sub-buffer with rmdir\n"
3543 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003544 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3545 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003546#ifdef CONFIG_DYNAMIC_FTRACE
3547 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003548 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3549 "\t\t\t functions\n"
3550 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3551 "\t modules: Can select a group via module\n"
3552 "\t Format: :mod:<module-name>\n"
3553 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3554 "\t triggers: a command to perform when function is hit\n"
3555 "\t Format: <function>:<trigger>[:count]\n"
3556 "\t trigger: traceon, traceoff\n"
3557 "\t\t enable_event:<system>:<event>\n"
3558 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003559#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003560 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003561#endif
3562#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003563 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003564#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003565 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3566 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3567 "\t The first one will disable tracing every time do_fault is hit\n"
3568 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3569 "\t The first time do trap is hit and it disables tracing, the\n"
3570 "\t counter will decrement to 2. If tracing is already disabled,\n"
3571 "\t the counter will not decrement. It only decrements when the\n"
3572 "\t trigger did work\n"
3573 "\t To remove trigger without count:\n"
3574 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3575 "\t To remove trigger with a count:\n"
3576 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003577 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003578 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3579 "\t modules: Can select a group via module command :mod:\n"
3580 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003581#endif /* CONFIG_DYNAMIC_FTRACE */
3582#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003583 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3584 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003585#endif
3586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3587 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3588 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3589#endif
3590#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3592 "\t\t\t snapshot buffer. Read the contents for more\n"
3593 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003594#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003595#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003596 " stack_trace\t\t- Shows the max stack trace when active\n"
3597 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003598 "\t\t\t Write into this file to reset the max size (trigger a\n"
3599 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003600#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003601 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3602 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003603#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003604#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003605 " events/\t\t- Directory containing all trace event subsystems:\n"
3606 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3607 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003608 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3609 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003610 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003611 " events/<system>/<event>/\t- Directory containing control files for\n"
3612 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003613 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3614 " filter\t\t- If set, only events passing filter are traced\n"
3615 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003616 "\t Format: <trigger>[:count][if <filter>]\n"
3617 "\t trigger: traceon, traceoff\n"
3618 "\t enable_event:<system>:<event>\n"
3619 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003620#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003621 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003622#endif
3623#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003624 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003625#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003626 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3627 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3628 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3629 "\t events/block/block_unplug/trigger\n"
3630 "\t The first disables tracing every time block_unplug is hit.\n"
3631 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3632 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3633 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3634 "\t Like function triggers, the counter is only decremented if it\n"
3635 "\t enabled or disabled tracing.\n"
3636 "\t To remove a trigger without a count:\n"
3637 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3638 "\t To remove a trigger with a count:\n"
3639 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3640 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003641;
3642
3643static ssize_t
3644tracing_readme_read(struct file *filp, char __user *ubuf,
3645 size_t cnt, loff_t *ppos)
3646{
3647 return simple_read_from_buffer(ubuf, cnt, ppos,
3648 readme_msg, strlen(readme_msg));
3649}
3650
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003651static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003652 .open = tracing_open_generic,
3653 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003654 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003655};
3656
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003657static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003658tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3659 size_t cnt, loff_t *ppos)
3660{
3661 char *buf_comm;
3662 char *file_buf;
3663 char *buf;
3664 int len = 0;
3665 int pid;
3666 int i;
3667
3668 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3669 if (!file_buf)
3670 return -ENOMEM;
3671
3672 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3673 if (!buf_comm) {
3674 kfree(file_buf);
3675 return -ENOMEM;
3676 }
3677
3678 buf = file_buf;
3679
3680 for (i = 0; i < SAVED_CMDLINES; i++) {
3681 int r;
3682
3683 pid = map_cmdline_to_pid[i];
3684 if (pid == -1 || pid == NO_CMDLINE_MAP)
3685 continue;
3686
3687 trace_find_cmdline(pid, buf_comm);
3688 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3689 buf += r;
3690 len += r;
3691 }
3692
3693 len = simple_read_from_buffer(ubuf, cnt, ppos,
3694 file_buf, len);
3695
3696 kfree(file_buf);
3697 kfree(buf_comm);
3698
3699 return len;
3700}
3701
3702static const struct file_operations tracing_saved_cmdlines_fops = {
3703 .open = tracing_open_generic,
3704 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003705 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003706};
3707
3708static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003709tracing_set_trace_read(struct file *filp, char __user *ubuf,
3710 size_t cnt, loff_t *ppos)
3711{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003712 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003713 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003714 int r;
3715
3716 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003717 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003718 mutex_unlock(&trace_types_lock);
3719
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003720 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003721}
3722
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003723int tracer_init(struct tracer *t, struct trace_array *tr)
3724{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003725 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003726 return t->init(tr);
3727}
3728
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003729static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003730{
3731 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003732
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003733 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003734 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003735}
3736
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003737#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003738/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003739static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3740 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003741{
3742 int cpu, ret = 0;
3743
3744 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3745 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003746 ret = ring_buffer_resize(trace_buf->buffer,
3747 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003748 if (ret < 0)
3749 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003750 per_cpu_ptr(trace_buf->data, cpu)->entries =
3751 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003752 }
3753 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003754 ret = ring_buffer_resize(trace_buf->buffer,
3755 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003756 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003757 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3758 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003759 }
3760
3761 return ret;
3762}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003763#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003764
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003765static int __tracing_resize_ring_buffer(struct trace_array *tr,
3766 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003767{
3768 int ret;
3769
3770 /*
3771 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003772 * we use the size that was given, and we can forget about
3773 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003774 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003775 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003776
Steven Rostedtb382ede62012-10-10 21:44:34 -04003777 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003778 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003779 return 0;
3780
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003781 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003782 if (ret < 0)
3783 return ret;
3784
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003785#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003786 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3787 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003788 goto out;
3789
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003790 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003791 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003792 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3793 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003794 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003795 /*
3796 * AARGH! We are left with different
3797 * size max buffer!!!!
3798 * The max buffer is our "snapshot" buffer.
3799 * When a tracer needs a snapshot (one of the
3800 * latency tracers), it swaps the max buffer
3801 * with the saved snap shot. We succeeded to
3802 * update the size of the main buffer, but failed to
3803 * update the size of the max buffer. But when we tried
3804 * to reset the main buffer to the original size, we
3805 * failed there too. This is very unlikely to
3806 * happen, but if it does, warn and kill all
3807 * tracing.
3808 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003809 WARN_ON(1);
3810 tracing_disabled = 1;
3811 }
3812 return ret;
3813 }
3814
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003815 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003816 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003817 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003818 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003819
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003820 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003821#endif /* CONFIG_TRACER_MAX_TRACE */
3822
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003823 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003824 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003825 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003826 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003827
3828 return ret;
3829}
3830
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003831static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3832 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003833{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003834 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003835
3836 mutex_lock(&trace_types_lock);
3837
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003838 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3839 /* make sure, this cpu is enabled in the mask */
3840 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3841 ret = -EINVAL;
3842 goto out;
3843 }
3844 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003846 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003847 if (ret < 0)
3848 ret = -ENOMEM;
3849
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003850out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003851 mutex_unlock(&trace_types_lock);
3852
3853 return ret;
3854}
3855
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003856
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003857/**
3858 * tracing_update_buffers - used by tracing facility to expand ring buffers
3859 *
3860 * To save on memory when the tracing is never used on a system with it
3861 * configured in. The ring buffers are set to a minimum size. But once
3862 * a user starts to use the tracing facility, then they need to grow
3863 * to their default size.
3864 *
3865 * This function is to be called when a tracer is about to be used.
3866 */
3867int tracing_update_buffers(void)
3868{
3869 int ret = 0;
3870
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003871 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003872 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003873 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003874 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003875 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003876
3877 return ret;
3878}
3879
Steven Rostedt577b7852009-02-26 23:43:05 -05003880struct trace_option_dentry;
3881
3882static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003883create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003884
3885static void
3886destroy_trace_option_files(struct trace_option_dentry *topts);
3887
Steven Rostedtb2821ae2009-02-02 21:38:32 -05003888static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003889{
Steven Rostedt577b7852009-02-26 23:43:05 -05003890 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003891 struct trace_array *tr = &global_trace;
3892 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003893#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003894 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003895#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003896 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003897
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003898 mutex_lock(&trace_types_lock);
3899
Steven Rostedt73c51622009-03-11 13:42:01 -04003900 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003901 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003902 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003903 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003904 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003905 ret = 0;
3906 }
3907
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003908 for (t = trace_types; t; t = t->next) {
3909 if (strcmp(t->name, buf) == 0)
3910 break;
3911 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003912 if (!t) {
3913 ret = -EINVAL;
3914 goto out;
3915 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003916 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917 goto out;
3918
Steven Rostedt9f029e82008-11-12 15:24:24 -05003919 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003920
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003921 tr->current_trace->enabled = false;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003922
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003923 if (tr->current_trace->reset)
3924 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003925
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003926 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003927 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003928
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003929#ifdef CONFIG_TRACER_MAX_TRACE
3930 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003931
3932 if (had_max_tr && !t->use_max_tr) {
3933 /*
3934 * We need to make sure that the update_max_tr sees that
3935 * current_trace changed to nop_trace to keep it from
3936 * swapping the buffers after we resize it.
3937 * The update_max_tr is called from interrupts disabled
3938 * so a synchronized_sched() is sufficient.
3939 */
3940 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003941 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003942 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003943#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003944 destroy_trace_option_files(topts);
3945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003946 topts = create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003947
3948#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003949 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003950 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003951 if (ret < 0)
3952 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003953 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003955
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003956 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003957 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003958 if (ret)
3959 goto out;
3960 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003961
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003962 tr->current_trace = t;
3963 tr->current_trace->enabled = true;
Steven Rostedt9f029e82008-11-12 15:24:24 -05003964 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003965 out:
3966 mutex_unlock(&trace_types_lock);
3967
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003968 return ret;
3969}
3970
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003971static ssize_t
3972tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3973 size_t cnt, loff_t *ppos)
3974{
Li Zefanee6c2c12009-09-18 14:06:47 +08003975 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003976 int i;
3977 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003978 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003979
Steven Rostedt60063a62008-10-28 10:44:24 -04003980 ret = cnt;
3981
Li Zefanee6c2c12009-09-18 14:06:47 +08003982 if (cnt > MAX_TRACER_SIZE)
3983 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003984
3985 if (copy_from_user(&buf, ubuf, cnt))
3986 return -EFAULT;
3987
3988 buf[cnt] = 0;
3989
3990 /* strip ending whitespace. */
3991 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3992 buf[i] = 0;
3993
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003994 err = tracing_set_tracer(buf);
3995 if (err)
3996 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003997
Jiri Olsacf8517c2009-10-23 19:36:16 -04003998 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003999
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004000 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004001}
4002
4003static ssize_t
4004tracing_max_lat_read(struct file *filp, char __user *ubuf,
4005 size_t cnt, loff_t *ppos)
4006{
4007 unsigned long *ptr = filp->private_data;
4008 char buf[64];
4009 int r;
4010
Steven Rostedtcffae432008-05-12 21:21:00 +02004011 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004012 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004013 if (r > sizeof(buf))
4014 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004015 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016}
4017
4018static ssize_t
4019tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4020 size_t cnt, loff_t *ppos)
4021{
Hannes Eder5e398412009-02-10 19:44:34 +01004022 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004023 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004024 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004025
Peter Huewe22fe9b52011-06-07 21:58:27 +02004026 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4027 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004028 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004029
4030 *ptr = val * 1000;
4031
4032 return cnt;
4033}
4034
Steven Rostedtb3806b42008-05-12 21:20:46 +02004035static int tracing_open_pipe(struct inode *inode, struct file *filp)
4036{
Oleg Nesterov15544202013-07-23 17:25:57 +02004037 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004038 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004039 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004040
4041 if (tracing_disabled)
4042 return -ENODEV;
4043
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004044 if (trace_array_get(tr) < 0)
4045 return -ENODEV;
4046
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004047 mutex_lock(&trace_types_lock);
4048
Steven Rostedtb3806b42008-05-12 21:20:46 +02004049 /* create a buffer to store the information to pass to userspace */
4050 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004051 if (!iter) {
4052 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004053 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004054 goto out;
4055 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004056
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004057 /*
4058 * We make a copy of the current tracer to avoid concurrent
4059 * changes on it while we are reading.
4060 */
4061 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4062 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004063 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004064 goto fail;
4065 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004066 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004067
4068 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4069 ret = -ENOMEM;
4070 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304071 }
4072
Steven Rostedta3097202008-11-07 22:36:02 -05004073 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304074 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004075
Steven Rostedt112f38a72009-06-01 15:16:05 -04004076 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4077 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4078
David Sharp8be07092012-11-13 12:18:22 -08004079 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004080 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004081 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4082
Oleg Nesterov15544202013-07-23 17:25:57 +02004083 iter->tr = tr;
4084 iter->trace_buffer = &tr->trace_buffer;
4085 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004086 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004087 filp->private_data = iter;
4088
Steven Rostedt107bad82008-05-12 21:21:01 +02004089 if (iter->trace->pipe_open)
4090 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004091
Arnd Bergmannb4447862010-07-07 23:40:11 +02004092 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004093out:
4094 mutex_unlock(&trace_types_lock);
4095 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004096
4097fail:
4098 kfree(iter->trace);
4099 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004100 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004101 mutex_unlock(&trace_types_lock);
4102 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004103}
4104
4105static int tracing_release_pipe(struct inode *inode, struct file *file)
4106{
4107 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004108 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004109
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004110 mutex_lock(&trace_types_lock);
4111
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004112 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004113 iter->trace->pipe_close(iter);
4114
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004115 mutex_unlock(&trace_types_lock);
4116
Rusty Russell44623442009-01-01 10:12:23 +10304117 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004118 mutex_destroy(&iter->mutex);
4119 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004120 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004121
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004122 trace_array_put(tr);
4123
Steven Rostedtb3806b42008-05-12 21:20:46 +02004124 return 0;
4125}
4126
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004127static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004128trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004129{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004130 /* Iterators are static, they should be filled or empty */
4131 if (trace_buffer_iter(iter, iter->cpu_file))
4132 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004133
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004134 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004135 /*
4136 * Always select as readable when in blocking mode
4137 */
4138 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004139 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004140 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004141 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004142}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004143
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004144static unsigned int
4145tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4146{
4147 struct trace_iterator *iter = filp->private_data;
4148
4149 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004150}
4151
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004152/*
4153 * This is a make-shift waitqueue.
4154 * A tracer might use this callback on some rare cases:
4155 *
4156 * 1) the current tracer might hold the runqueue lock when it wakes up
4157 * a reader, hence a deadlock (sched, function, and function graph tracers)
4158 * 2) the function tracers, trace all functions, we don't want
4159 * the overhead of calling wake_up and friends
4160 * (and tracing them too)
4161 *
4162 * Anyway, this is really very primitive wakeup.
4163 */
4164void poll_wait_pipe(struct trace_iterator *iter)
4165{
4166 set_current_state(TASK_INTERRUPTIBLE);
4167 /* sleep for 100 msecs, and try again. */
4168 schedule_timeout(HZ / 10);
4169}
4170
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004171/* Must be called with trace_types_lock mutex held. */
4172static int tracing_wait_pipe(struct file *filp)
4173{
4174 struct trace_iterator *iter = filp->private_data;
4175
4176 while (trace_empty(iter)) {
4177
4178 if ((filp->f_flags & O_NONBLOCK)) {
4179 return -EAGAIN;
4180 }
4181
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004182 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004183
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004184 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004185
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004186 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004187
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004188 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004189 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004190
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004191 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004192 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004193 * We still block if tracing is disabled, but we have never
4194 * read anything. This allows a user to cat this file, and
4195 * then enable tracing. But after we have read something,
4196 * we give an EOF when tracing is again disabled.
4197 *
4198 * iter->pos will be 0 if we haven't read anything.
4199 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004200 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004201 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004202 }
4203
4204 return 1;
4205}
4206
Steven Rostedtb3806b42008-05-12 21:20:46 +02004207/*
4208 * Consumer reader.
4209 */
4210static ssize_t
4211tracing_read_pipe(struct file *filp, char __user *ubuf,
4212 size_t cnt, loff_t *ppos)
4213{
4214 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004215 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004216 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004217
4218 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004219 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4220 if (sret != -EBUSY)
4221 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004222
Steven Rostedtf9520752009-03-02 14:04:40 -05004223 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004224
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004225 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004226 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004227 if (unlikely(iter->trace->name != tr->current_trace->name))
4228 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004229 mutex_unlock(&trace_types_lock);
4230
4231 /*
4232 * Avoid more than one consumer on a single file descriptor
4233 * This is just a matter of traces coherency, the ring buffer itself
4234 * is protected.
4235 */
4236 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004237 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004238 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4239 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004240 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004241 }
4242
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004243waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004244 sret = tracing_wait_pipe(filp);
4245 if (sret <= 0)
4246 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004247
4248 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004249 if (trace_empty(iter)) {
4250 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004251 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004252 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004253
4254 if (cnt >= PAGE_SIZE)
4255 cnt = PAGE_SIZE - 1;
4256
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004257 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004258 memset(&iter->seq, 0,
4259 sizeof(struct trace_iterator) -
4260 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004261 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004262 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004263
Lai Jiangshan4f535962009-05-18 19:35:34 +08004264 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004265 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004266 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004267 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004268 int len = iter->seq.len;
4269
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004270 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004271 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004272 /* don't print partial lines */
4273 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004274 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004275 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004276 if (ret != TRACE_TYPE_NO_CONSUME)
4277 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004278
4279 if (iter->seq.len >= cnt)
4280 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004281
4282 /*
4283 * Setting the full flag means we reached the trace_seq buffer
4284 * size and we should leave by partial output condition above.
4285 * One of the trace_seq_* functions is not used properly.
4286 */
4287 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4288 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004289 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004290 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004291 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004292
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004294 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4295 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004296 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004297
4298 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004299 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004300 * entries, go back to wait for more entries.
4301 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004302 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004303 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004304
Steven Rostedt107bad82008-05-12 21:21:01 +02004305out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004306 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004307
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004308 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004309}
4310
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004311static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4312 unsigned int idx)
4313{
4314 __free_page(spd->pages[idx]);
4315}
4316
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004317static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004318 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004319 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004320 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004321 .steal = generic_pipe_buf_steal,
4322 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004323};
4324
Steven Rostedt34cd4992009-02-09 12:06:29 -05004325static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004326tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004327{
4328 size_t count;
4329 int ret;
4330
4331 /* Seq buffer is page-sized, exactly what we need. */
4332 for (;;) {
4333 count = iter->seq.len;
4334 ret = print_trace_line(iter);
4335 count = iter->seq.len - count;
4336 if (rem < count) {
4337 rem = 0;
4338 iter->seq.len -= count;
4339 break;
4340 }
4341 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4342 iter->seq.len -= count;
4343 break;
4344 }
4345
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004346 if (ret != TRACE_TYPE_NO_CONSUME)
4347 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004348 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004349 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004350 rem = 0;
4351 iter->ent = NULL;
4352 break;
4353 }
4354 }
4355
4356 return rem;
4357}
4358
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004359static ssize_t tracing_splice_read_pipe(struct file *filp,
4360 loff_t *ppos,
4361 struct pipe_inode_info *pipe,
4362 size_t len,
4363 unsigned int flags)
4364{
Jens Axboe35f3d142010-05-20 10:43:18 +02004365 struct page *pages_def[PIPE_DEF_BUFFERS];
4366 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004367 struct trace_iterator *iter = filp->private_data;
4368 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004369 .pages = pages_def,
4370 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004371 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004372 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004373 .flags = flags,
4374 .ops = &tracing_pipe_buf_ops,
4375 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004376 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004377 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004378 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004379 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004380 unsigned int i;
4381
Jens Axboe35f3d142010-05-20 10:43:18 +02004382 if (splice_grow_spd(pipe, &spd))
4383 return -ENOMEM;
4384
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004385 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004386 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004387 if (unlikely(iter->trace->name != tr->current_trace->name))
4388 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004389 mutex_unlock(&trace_types_lock);
4390
4391 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004392
4393 if (iter->trace->splice_read) {
4394 ret = iter->trace->splice_read(iter, filp,
4395 ppos, pipe, len, flags);
4396 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004397 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004398 }
4399
4400 ret = tracing_wait_pipe(filp);
4401 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004402 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004403
Jason Wessel955b61e2010-08-05 09:22:23 -05004404 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004405 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004406 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004407 }
4408
Lai Jiangshan4f535962009-05-18 19:35:34 +08004409 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004410 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004411
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004412 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02004413 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4414 spd.pages[i] = alloc_page(GFP_KERNEL);
4415 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004416 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004417
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004418 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004419
4420 /* Copy the data into the page, so we can start over. */
4421 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004422 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004423 iter->seq.len);
4424 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004425 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004426 break;
4427 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004428 spd.partial[i].offset = 0;
4429 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004430
Steven Rostedtf9520752009-03-02 14:04:40 -05004431 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004432 }
4433
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004434 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004435 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004436 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004437
4438 spd.nr_pages = i;
4439
Jens Axboe35f3d142010-05-20 10:43:18 +02004440 ret = splice_to_pipe(pipe, &spd);
4441out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004442 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004443 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004444
Steven Rostedt34cd4992009-02-09 12:06:29 -05004445out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004446 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004447 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004448}
4449
Steven Rostedta98a3c32008-05-12 21:20:59 +02004450static ssize_t
4451tracing_entries_read(struct file *filp, char __user *ubuf,
4452 size_t cnt, loff_t *ppos)
4453{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004454 struct inode *inode = file_inode(filp);
4455 struct trace_array *tr = inode->i_private;
4456 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004457 char buf[64];
4458 int r = 0;
4459 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004460
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004461 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004462
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004463 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004464 int cpu, buf_size_same;
4465 unsigned long size;
4466
4467 size = 0;
4468 buf_size_same = 1;
4469 /* check if all cpu sizes are same */
4470 for_each_tracing_cpu(cpu) {
4471 /* fill in the size from first enabled cpu */
4472 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004473 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4474 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004475 buf_size_same = 0;
4476 break;
4477 }
4478 }
4479
4480 if (buf_size_same) {
4481 if (!ring_buffer_expanded)
4482 r = sprintf(buf, "%lu (expanded: %lu)\n",
4483 size >> 10,
4484 trace_buf_size >> 10);
4485 else
4486 r = sprintf(buf, "%lu\n", size >> 10);
4487 } else
4488 r = sprintf(buf, "X\n");
4489 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004490 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004491
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004492 mutex_unlock(&trace_types_lock);
4493
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004494 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4495 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004496}
4497
4498static ssize_t
4499tracing_entries_write(struct file *filp, const char __user *ubuf,
4500 size_t cnt, loff_t *ppos)
4501{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004502 struct inode *inode = file_inode(filp);
4503 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004504 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004505 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004506
Peter Huewe22fe9b52011-06-07 21:58:27 +02004507 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4508 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004509 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004510
4511 /* must have at least 1 entry */
4512 if (!val)
4513 return -EINVAL;
4514
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004515 /* value is in KB */
4516 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004517 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004518 if (ret < 0)
4519 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004520
Jiri Olsacf8517c2009-10-23 19:36:16 -04004521 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004522
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004523 return cnt;
4524}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004525
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004526static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004527tracing_total_entries_read(struct file *filp, char __user *ubuf,
4528 size_t cnt, loff_t *ppos)
4529{
4530 struct trace_array *tr = filp->private_data;
4531 char buf[64];
4532 int r, cpu;
4533 unsigned long size = 0, expanded_size = 0;
4534
4535 mutex_lock(&trace_types_lock);
4536 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004537 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004538 if (!ring_buffer_expanded)
4539 expanded_size += trace_buf_size >> 10;
4540 }
4541 if (ring_buffer_expanded)
4542 r = sprintf(buf, "%lu\n", size);
4543 else
4544 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4545 mutex_unlock(&trace_types_lock);
4546
4547 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4548}
4549
4550static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004551tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4552 size_t cnt, loff_t *ppos)
4553{
4554 /*
4555 * There is no need to read what the user has written, this function
4556 * is just to make sure that there is no error when "echo" is used
4557 */
4558
4559 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004560
4561 return cnt;
4562}
4563
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004564static int
4565tracing_free_buffer_release(struct inode *inode, struct file *filp)
4566{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004567 struct trace_array *tr = inode->i_private;
4568
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004569 /* disable tracing ? */
4570 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004571 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004572 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004573 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004574
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004575 trace_array_put(tr);
4576
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004577 return 0;
4578}
4579
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004580static ssize_t
4581tracing_mark_write(struct file *filp, const char __user *ubuf,
4582 size_t cnt, loff_t *fpos)
4583{
Steven Rostedtd696b582011-09-22 11:50:27 -04004584 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004585 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004586 struct ring_buffer_event *event;
4587 struct ring_buffer *buffer;
4588 struct print_entry *entry;
4589 unsigned long irq_flags;
4590 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004591 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004592 int nr_pages = 1;
4593 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004594 int offset;
4595 int size;
4596 int len;
4597 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004598 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004599
Steven Rostedtc76f0692008-11-07 22:36:02 -05004600 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004601 return -EINVAL;
4602
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004603 if (!(trace_flags & TRACE_ITER_MARKERS))
4604 return -EINVAL;
4605
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004606 if (cnt > TRACE_BUF_SIZE)
4607 cnt = TRACE_BUF_SIZE;
4608
Steven Rostedtd696b582011-09-22 11:50:27 -04004609 /*
4610 * Userspace is injecting traces into the kernel trace buffer.
4611 * We want to be as non intrusive as possible.
4612 * To do so, we do not want to allocate any special buffers
4613 * or take any locks, but instead write the userspace data
4614 * straight into the ring buffer.
4615 *
4616 * First we need to pin the userspace buffer into memory,
4617 * which, most likely it is, because it just referenced it.
4618 * But there's no guarantee that it is. By using get_user_pages_fast()
4619 * and kmap_atomic/kunmap_atomic() we can get access to the
4620 * pages directly. We then write the data directly into the
4621 * ring buffer.
4622 */
4623 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004624
Steven Rostedtd696b582011-09-22 11:50:27 -04004625 /* check if we cross pages */
4626 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4627 nr_pages = 2;
4628
4629 offset = addr & (PAGE_SIZE - 1);
4630 addr &= PAGE_MASK;
4631
4632 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4633 if (ret < nr_pages) {
4634 while (--ret >= 0)
4635 put_page(pages[ret]);
4636 written = -EFAULT;
4637 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004638 }
4639
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004640 for (i = 0; i < nr_pages; i++)
4641 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004642
4643 local_save_flags(irq_flags);
4644 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004645 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004646 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4647 irq_flags, preempt_count());
4648 if (!event) {
4649 /* Ring buffer disabled, return as if not open for write */
4650 written = -EBADF;
4651 goto out_unlock;
4652 }
4653
4654 entry = ring_buffer_event_data(event);
4655 entry->ip = _THIS_IP_;
4656
4657 if (nr_pages == 2) {
4658 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004659 memcpy(&entry->buf, map_page[0] + offset, len);
4660 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004661 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004662 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004663
4664 if (entry->buf[cnt - 1] != '\n') {
4665 entry->buf[cnt] = '\n';
4666 entry->buf[cnt + 1] = '\0';
4667 } else
4668 entry->buf[cnt] = '\0';
4669
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004670 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004671
4672 written = cnt;
4673
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004674 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004675
Steven Rostedtd696b582011-09-22 11:50:27 -04004676 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004677 for (i = 0; i < nr_pages; i++){
4678 kunmap_atomic(map_page[i]);
4679 put_page(pages[i]);
4680 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004681 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004682 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004683}
4684
Li Zefan13f16d22009-12-08 11:16:11 +08004685static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004686{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004687 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004688 int i;
4689
4690 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004691 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004692 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004693 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4694 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004695 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004696
Li Zefan13f16d22009-12-08 11:16:11 +08004697 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004698}
4699
4700static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4701 size_t cnt, loff_t *fpos)
4702{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004703 struct seq_file *m = filp->private_data;
4704 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004705 char buf[64];
4706 const char *clockstr;
4707 int i;
4708
4709 if (cnt >= sizeof(buf))
4710 return -EINVAL;
4711
4712 if (copy_from_user(&buf, ubuf, cnt))
4713 return -EFAULT;
4714
4715 buf[cnt] = 0;
4716
4717 clockstr = strstrip(buf);
4718
4719 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4720 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4721 break;
4722 }
4723 if (i == ARRAY_SIZE(trace_clocks))
4724 return -EINVAL;
4725
Zhaolei5079f322009-08-25 16:12:56 +08004726 mutex_lock(&trace_types_lock);
4727
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004728 tr->clock_id = i;
4729
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004730 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004731
David Sharp60303ed2012-10-11 16:27:52 -07004732 /*
4733 * New clock may not be consistent with the previous clock.
4734 * Reset the buffer so that it doesn't have incomparable timestamps.
4735 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004736 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004737
4738#ifdef CONFIG_TRACER_MAX_TRACE
4739 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4740 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004741 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004742#endif
David Sharp60303ed2012-10-11 16:27:52 -07004743
Zhaolei5079f322009-08-25 16:12:56 +08004744 mutex_unlock(&trace_types_lock);
4745
4746 *fpos += cnt;
4747
4748 return cnt;
4749}
4750
Li Zefan13f16d22009-12-08 11:16:11 +08004751static int tracing_clock_open(struct inode *inode, struct file *file)
4752{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004753 struct trace_array *tr = inode->i_private;
4754 int ret;
4755
Li Zefan13f16d22009-12-08 11:16:11 +08004756 if (tracing_disabled)
4757 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004758
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004759 if (trace_array_get(tr))
4760 return -ENODEV;
4761
4762 ret = single_open(file, tracing_clock_show, inode->i_private);
4763 if (ret < 0)
4764 trace_array_put(tr);
4765
4766 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004767}
4768
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004769struct ftrace_buffer_info {
4770 struct trace_iterator iter;
4771 void *spare;
4772 unsigned int read;
4773};
4774
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004775#ifdef CONFIG_TRACER_SNAPSHOT
4776static int tracing_snapshot_open(struct inode *inode, struct file *file)
4777{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004778 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004779 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004780 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004781 int ret = 0;
4782
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004783 if (trace_array_get(tr) < 0)
4784 return -ENODEV;
4785
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004786 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004787 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004788 if (IS_ERR(iter))
4789 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004790 } else {
4791 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004792 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004793 m = kzalloc(sizeof(*m), GFP_KERNEL);
4794 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004795 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004796 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4797 if (!iter) {
4798 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004799 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004800 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004801 ret = 0;
4802
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004803 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004804 iter->trace_buffer = &tr->max_buffer;
4805 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004806 m->private = iter;
4807 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004808 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004809out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004810 if (ret < 0)
4811 trace_array_put(tr);
4812
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004813 return ret;
4814}
4815
4816static ssize_t
4817tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4818 loff_t *ppos)
4819{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004820 struct seq_file *m = filp->private_data;
4821 struct trace_iterator *iter = m->private;
4822 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004823 unsigned long val;
4824 int ret;
4825
4826 ret = tracing_update_buffers();
4827 if (ret < 0)
4828 return ret;
4829
4830 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4831 if (ret)
4832 return ret;
4833
4834 mutex_lock(&trace_types_lock);
4835
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004836 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004837 ret = -EBUSY;
4838 goto out;
4839 }
4840
4841 switch (val) {
4842 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004843 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4844 ret = -EINVAL;
4845 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004846 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004847 if (tr->allocated_snapshot)
4848 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004849 break;
4850 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004851/* Only allow per-cpu swap if the ring buffer supports it */
4852#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4853 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4854 ret = -EINVAL;
4855 break;
4856 }
4857#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004858 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004859 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004860 if (ret < 0)
4861 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004862 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004863 local_irq_disable();
4864 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004865 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004866 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004867 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004868 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004869 local_irq_enable();
4870 break;
4871 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004872 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004873 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4874 tracing_reset_online_cpus(&tr->max_buffer);
4875 else
4876 tracing_reset(&tr->max_buffer, iter->cpu_file);
4877 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004878 break;
4879 }
4880
4881 if (ret >= 0) {
4882 *ppos += cnt;
4883 ret = cnt;
4884 }
4885out:
4886 mutex_unlock(&trace_types_lock);
4887 return ret;
4888}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004889
4890static int tracing_snapshot_release(struct inode *inode, struct file *file)
4891{
4892 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004893 int ret;
4894
4895 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004896
4897 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004898 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004899
4900 /* If write only, the seq_file is just a stub */
4901 if (m)
4902 kfree(m->private);
4903 kfree(m);
4904
4905 return 0;
4906}
4907
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004908static int tracing_buffers_open(struct inode *inode, struct file *filp);
4909static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4910 size_t count, loff_t *ppos);
4911static int tracing_buffers_release(struct inode *inode, struct file *file);
4912static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4913 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4914
4915static int snapshot_raw_open(struct inode *inode, struct file *filp)
4916{
4917 struct ftrace_buffer_info *info;
4918 int ret;
4919
4920 ret = tracing_buffers_open(inode, filp);
4921 if (ret < 0)
4922 return ret;
4923
4924 info = filp->private_data;
4925
4926 if (info->iter.trace->use_max_tr) {
4927 tracing_buffers_release(inode, filp);
4928 return -EBUSY;
4929 }
4930
4931 info->iter.snapshot = true;
4932 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4933
4934 return ret;
4935}
4936
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004937#endif /* CONFIG_TRACER_SNAPSHOT */
4938
4939
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004940static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004941 .open = tracing_open_generic,
4942 .read = tracing_max_lat_read,
4943 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004944 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004945};
4946
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004947static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004948 .open = tracing_open_generic,
4949 .read = tracing_set_trace_read,
4950 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004951 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004952};
4953
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004954static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004955 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004956 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004957 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004958 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004959 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004960 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02004961};
4962
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004963static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004964 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004965 .read = tracing_entries_read,
4966 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004967 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004968 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004969};
4970
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004971static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004972 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004973 .read = tracing_total_entries_read,
4974 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004975 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004976};
4977
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004978static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004979 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004980 .write = tracing_free_buffer_write,
4981 .release = tracing_free_buffer_release,
4982};
4983
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004984static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004985 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004986 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004987 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004988 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004989};
4990
Zhaolei5079f322009-08-25 16:12:56 +08004991static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08004992 .open = tracing_clock_open,
4993 .read = seq_read,
4994 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004995 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08004996 .write = tracing_clock_write,
4997};
4998
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004999#ifdef CONFIG_TRACER_SNAPSHOT
5000static const struct file_operations snapshot_fops = {
5001 .open = tracing_snapshot_open,
5002 .read = seq_read,
5003 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005004 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005005 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005006};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005007
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005008static const struct file_operations snapshot_raw_fops = {
5009 .open = snapshot_raw_open,
5010 .read = tracing_buffers_read,
5011 .release = tracing_buffers_release,
5012 .splice_read = tracing_buffers_splice_read,
5013 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005014};
5015
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005016#endif /* CONFIG_TRACER_SNAPSHOT */
5017
Steven Rostedt2cadf912008-12-01 22:20:19 -05005018static int tracing_buffers_open(struct inode *inode, struct file *filp)
5019{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005020 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005021 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005022 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005023
5024 if (tracing_disabled)
5025 return -ENODEV;
5026
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005027 if (trace_array_get(tr) < 0)
5028 return -ENODEV;
5029
Steven Rostedt2cadf912008-12-01 22:20:19 -05005030 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005031 if (!info) {
5032 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005033 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005034 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005035
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005036 mutex_lock(&trace_types_lock);
5037
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005038 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005039 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005040 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005041 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005042 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005043 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005044 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005045
5046 filp->private_data = info;
5047
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005048 mutex_unlock(&trace_types_lock);
5049
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005050 ret = nonseekable_open(inode, filp);
5051 if (ret < 0)
5052 trace_array_put(tr);
5053
5054 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005055}
5056
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005057static unsigned int
5058tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5059{
5060 struct ftrace_buffer_info *info = filp->private_data;
5061 struct trace_iterator *iter = &info->iter;
5062
5063 return trace_poll(iter, filp, poll_table);
5064}
5065
Steven Rostedt2cadf912008-12-01 22:20:19 -05005066static ssize_t
5067tracing_buffers_read(struct file *filp, char __user *ubuf,
5068 size_t count, loff_t *ppos)
5069{
5070 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005071 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005072 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005073 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005074
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005075 if (!count)
5076 return 0;
5077
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005078 mutex_lock(&trace_types_lock);
5079
5080#ifdef CONFIG_TRACER_MAX_TRACE
5081 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5082 size = -EBUSY;
5083 goto out_unlock;
5084 }
5085#endif
5086
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005087 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005088 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5089 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005090 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005091 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005092 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005093
Steven Rostedt2cadf912008-12-01 22:20:19 -05005094 /* Do we have previous read data to read? */
5095 if (info->read < PAGE_SIZE)
5096 goto read;
5097
Steven Rostedtb6273442013-02-28 13:44:11 -05005098 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005099 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005100 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005101 &info->spare,
5102 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005103 iter->cpu_file, 0);
5104 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005105
5106 if (ret < 0) {
5107 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005108 if ((filp->f_flags & O_NONBLOCK)) {
5109 size = -EAGAIN;
5110 goto out_unlock;
5111 }
5112 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005113 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005114 mutex_lock(&trace_types_lock);
5115 if (signal_pending(current)) {
5116 size = -EINTR;
5117 goto out_unlock;
5118 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005119 goto again;
5120 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005121 size = 0;
5122 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005123 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005124
Steven Rostedt436fc282011-10-14 10:44:25 -04005125 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005126 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005127 size = PAGE_SIZE - info->read;
5128 if (size > count)
5129 size = count;
5130
5131 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005132 if (ret == size) {
5133 size = -EFAULT;
5134 goto out_unlock;
5135 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005136 size -= ret;
5137
Steven Rostedt2cadf912008-12-01 22:20:19 -05005138 *ppos += size;
5139 info->read += size;
5140
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005141 out_unlock:
5142 mutex_unlock(&trace_types_lock);
5143
Steven Rostedt2cadf912008-12-01 22:20:19 -05005144 return size;
5145}
5146
5147static int tracing_buffers_release(struct inode *inode, struct file *file)
5148{
5149 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005150 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005151
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005152 mutex_lock(&trace_types_lock);
5153
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005154 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005155
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005156 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005157 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005158 kfree(info);
5159
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005160 mutex_unlock(&trace_types_lock);
5161
Steven Rostedt2cadf912008-12-01 22:20:19 -05005162 return 0;
5163}
5164
5165struct buffer_ref {
5166 struct ring_buffer *buffer;
5167 void *page;
5168 int ref;
5169};
5170
5171static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5172 struct pipe_buffer *buf)
5173{
5174 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5175
5176 if (--ref->ref)
5177 return;
5178
5179 ring_buffer_free_read_page(ref->buffer, ref->page);
5180 kfree(ref);
5181 buf->private = 0;
5182}
5183
Steven Rostedt2cadf912008-12-01 22:20:19 -05005184static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5185 struct pipe_buffer *buf)
5186{
5187 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5188
5189 ref->ref++;
5190}
5191
5192/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005193static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005194 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005195 .confirm = generic_pipe_buf_confirm,
5196 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005197 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005198 .get = buffer_pipe_buf_get,
5199};
5200
5201/*
5202 * Callback from splice_to_pipe(), if we need to release some pages
5203 * at the end of the spd in case we error'ed out in filling the pipe.
5204 */
5205static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5206{
5207 struct buffer_ref *ref =
5208 (struct buffer_ref *)spd->partial[i].private;
5209
5210 if (--ref->ref)
5211 return;
5212
5213 ring_buffer_free_read_page(ref->buffer, ref->page);
5214 kfree(ref);
5215 spd->partial[i].private = 0;
5216}
5217
5218static ssize_t
5219tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5220 struct pipe_inode_info *pipe, size_t len,
5221 unsigned int flags)
5222{
5223 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005224 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005225 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5226 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005227 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005228 .pages = pages_def,
5229 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005230 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005231 .flags = flags,
5232 .ops = &buffer_pipe_buf_ops,
5233 .spd_release = buffer_spd_release,
5234 };
5235 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005236 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005237 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005238
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005239 mutex_lock(&trace_types_lock);
5240
5241#ifdef CONFIG_TRACER_MAX_TRACE
5242 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5243 ret = -EBUSY;
5244 goto out;
5245 }
5246#endif
5247
5248 if (splice_grow_spd(pipe, &spd)) {
5249 ret = -ENOMEM;
5250 goto out;
5251 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005252
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005253 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005254 ret = -EINVAL;
5255 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005256 }
5257
5258 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005259 if (len < PAGE_SIZE) {
5260 ret = -EINVAL;
5261 goto out;
5262 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005263 len &= PAGE_MASK;
5264 }
5265
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005266 again:
5267 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005268 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005269
Jens Axboe35f3d142010-05-20 10:43:18 +02005270 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005271 struct page *page;
5272 int r;
5273
5274 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5275 if (!ref)
5276 break;
5277
Steven Rostedt7267fa62009-04-29 00:16:21 -04005278 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005279 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005280 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005281 if (!ref->page) {
5282 kfree(ref);
5283 break;
5284 }
5285
5286 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005287 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005288 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005289 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005290 kfree(ref);
5291 break;
5292 }
5293
5294 /*
5295 * zero out any left over data, this is going to
5296 * user land.
5297 */
5298 size = ring_buffer_page_len(ref->page);
5299 if (size < PAGE_SIZE)
5300 memset(ref->page + size, 0, PAGE_SIZE - size);
5301
5302 page = virt_to_page(ref->page);
5303
5304 spd.pages[i] = page;
5305 spd.partial[i].len = PAGE_SIZE;
5306 spd.partial[i].offset = 0;
5307 spd.partial[i].private = (unsigned long)ref;
5308 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005309 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005310
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005311 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005312 }
5313
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005314 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315 spd.nr_pages = i;
5316
5317 /* did we read anything? */
5318 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005319 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005320 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005321 goto out;
5322 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005323 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005324 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005325 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005326 if (signal_pending(current)) {
5327 ret = -EINTR;
5328 goto out;
5329 }
5330 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005331 }
5332
5333 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005334 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005335out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005336 mutex_unlock(&trace_types_lock);
5337
Steven Rostedt2cadf912008-12-01 22:20:19 -05005338 return ret;
5339}
5340
5341static const struct file_operations tracing_buffers_fops = {
5342 .open = tracing_buffers_open,
5343 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005344 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005345 .release = tracing_buffers_release,
5346 .splice_read = tracing_buffers_splice_read,
5347 .llseek = no_llseek,
5348};
5349
Steven Rostedtc8d77182009-04-29 18:03:45 -04005350static ssize_t
5351tracing_stats_read(struct file *filp, char __user *ubuf,
5352 size_t count, loff_t *ppos)
5353{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005354 struct inode *inode = file_inode(filp);
5355 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005356 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005357 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005358 struct trace_seq *s;
5359 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005360 unsigned long long t;
5361 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005362
Li Zefane4f2d102009-06-15 10:57:28 +08005363 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005364 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005365 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005366
5367 trace_seq_init(s);
5368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005369 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005370 trace_seq_printf(s, "entries: %ld\n", cnt);
5371
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005372 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005373 trace_seq_printf(s, "overrun: %ld\n", cnt);
5374
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005375 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005376 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005378 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005379 trace_seq_printf(s, "bytes: %ld\n", cnt);
5380
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005381 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005382 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005383 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005384 usec_rem = do_div(t, USEC_PER_SEC);
5385 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5386 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005388 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005389 usec_rem = do_div(t, USEC_PER_SEC);
5390 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5391 } else {
5392 /* counter or tsc mode for trace_clock */
5393 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005394 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005395
5396 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005397 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005398 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005399
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005400 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005401 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5402
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005403 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005404 trace_seq_printf(s, "read events: %ld\n", cnt);
5405
Steven Rostedtc8d77182009-04-29 18:03:45 -04005406 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5407
5408 kfree(s);
5409
5410 return count;
5411}
5412
5413static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005414 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005415 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005416 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005417 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005418};
5419
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005420#ifdef CONFIG_DYNAMIC_FTRACE
5421
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005422int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005423{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005424 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005425}
5426
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005427static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005428tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005429 size_t cnt, loff_t *ppos)
5430{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005431 static char ftrace_dyn_info_buffer[1024];
5432 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005433 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005434 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005435 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005436 int r;
5437
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005438 mutex_lock(&dyn_info_mutex);
5439 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005440
Steven Rostedta26a2a22008-10-31 00:03:22 -04005441 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005442 buf[r++] = '\n';
5443
5444 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5445
5446 mutex_unlock(&dyn_info_mutex);
5447
5448 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005449}
5450
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005451static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005452 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005453 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005454 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005455};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005456#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005457
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005458#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5459static void
5460ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005461{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005462 tracing_snapshot();
5463}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005464
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005465static void
5466ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5467{
5468 unsigned long *count = (long *)data;
5469
5470 if (!*count)
5471 return;
5472
5473 if (*count != -1)
5474 (*count)--;
5475
5476 tracing_snapshot();
5477}
5478
5479static int
5480ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5481 struct ftrace_probe_ops *ops, void *data)
5482{
5483 long count = (long)data;
5484
5485 seq_printf(m, "%ps:", (void *)ip);
5486
5487 seq_printf(m, "snapshot");
5488
5489 if (count == -1)
5490 seq_printf(m, ":unlimited\n");
5491 else
5492 seq_printf(m, ":count=%ld\n", count);
5493
5494 return 0;
5495}
5496
5497static struct ftrace_probe_ops snapshot_probe_ops = {
5498 .func = ftrace_snapshot,
5499 .print = ftrace_snapshot_print,
5500};
5501
5502static struct ftrace_probe_ops snapshot_count_probe_ops = {
5503 .func = ftrace_count_snapshot,
5504 .print = ftrace_snapshot_print,
5505};
5506
5507static int
5508ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5509 char *glob, char *cmd, char *param, int enable)
5510{
5511 struct ftrace_probe_ops *ops;
5512 void *count = (void *)-1;
5513 char *number;
5514 int ret;
5515
5516 /* hash funcs only work with set_ftrace_filter */
5517 if (!enable)
5518 return -EINVAL;
5519
5520 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5521
5522 if (glob[0] == '!') {
5523 unregister_ftrace_function_probe_func(glob+1, ops);
5524 return 0;
5525 }
5526
5527 if (!param)
5528 goto out_reg;
5529
5530 number = strsep(&param, ":");
5531
5532 if (!strlen(number))
5533 goto out_reg;
5534
5535 /*
5536 * We use the callback data field (which is a pointer)
5537 * as our counter.
5538 */
5539 ret = kstrtoul(number, 0, (unsigned long *)&count);
5540 if (ret)
5541 return ret;
5542
5543 out_reg:
5544 ret = register_ftrace_function_probe(glob, ops, count);
5545
5546 if (ret >= 0)
5547 alloc_snapshot(&global_trace);
5548
5549 return ret < 0 ? ret : 0;
5550}
5551
5552static struct ftrace_func_command ftrace_snapshot_cmd = {
5553 .name = "snapshot",
5554 .func = ftrace_trace_snapshot_callback,
5555};
5556
Tom Zanussi38de93a2013-10-24 08:34:18 -05005557static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005558{
5559 return register_ftrace_command(&ftrace_snapshot_cmd);
5560}
5561#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005562static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005563#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005564
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005565struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005566{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005567 if (tr->dir)
5568 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005569
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005570 if (!debugfs_initialized())
5571 return NULL;
5572
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005573 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5574 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005575
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005576 if (!tr->dir)
5577 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005578
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005579 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005580}
5581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005582struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005583{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005584 return tracing_init_dentry_tr(&global_trace);
5585}
5586
5587static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5588{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005589 struct dentry *d_tracer;
5590
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005591 if (tr->percpu_dir)
5592 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005593
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005594 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005595 if (!d_tracer)
5596 return NULL;
5597
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005598 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005599
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005600 WARN_ONCE(!tr->percpu_dir,
5601 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005602
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005603 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005604}
5605
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005606static struct dentry *
5607trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5608 void *data, long cpu, const struct file_operations *fops)
5609{
5610 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5611
5612 if (ret) /* See tracing_get_cpu() */
5613 ret->d_inode->i_cdev = (void *)(cpu + 1);
5614 return ret;
5615}
5616
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005617static void
5618tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005619{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005620 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005621 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005622 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005623
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005624 if (!d_percpu)
5625 return;
5626
Steven Rostedtdd49a382010-10-20 21:51:26 -04005627 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005628 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5629 if (!d_cpu) {
5630 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5631 return;
5632 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005633
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005634 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005635 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005636 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005637
5638 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005639 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005640 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005641
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005642 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005643 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005644
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005645 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005646 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005647
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005648 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005649 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005650
5651#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005652 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005653 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005654
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005655 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005656 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005657#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005658}
5659
Steven Rostedt60a11772008-05-12 21:20:44 +02005660#ifdef CONFIG_FTRACE_SELFTEST
5661/* Let selftest have access to static functions in this file */
5662#include "trace_selftest.c"
5663#endif
5664
Steven Rostedt577b7852009-02-26 23:43:05 -05005665struct trace_option_dentry {
5666 struct tracer_opt *opt;
5667 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005668 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005669 struct dentry *entry;
5670};
5671
5672static ssize_t
5673trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5674 loff_t *ppos)
5675{
5676 struct trace_option_dentry *topt = filp->private_data;
5677 char *buf;
5678
5679 if (topt->flags->val & topt->opt->bit)
5680 buf = "1\n";
5681 else
5682 buf = "0\n";
5683
5684 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5685}
5686
5687static ssize_t
5688trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5689 loff_t *ppos)
5690{
5691 struct trace_option_dentry *topt = filp->private_data;
5692 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005693 int ret;
5694
Peter Huewe22fe9b52011-06-07 21:58:27 +02005695 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5696 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005697 return ret;
5698
Li Zefan8d18eaa2009-12-08 11:17:06 +08005699 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005700 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005701
5702 if (!!(topt->flags->val & topt->opt->bit) != val) {
5703 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005704 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005705 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005706 mutex_unlock(&trace_types_lock);
5707 if (ret)
5708 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005709 }
5710
5711 *ppos += cnt;
5712
5713 return cnt;
5714}
5715
5716
5717static const struct file_operations trace_options_fops = {
5718 .open = tracing_open_generic,
5719 .read = trace_options_read,
5720 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005721 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005722};
5723
Steven Rostedta8259072009-02-26 22:19:12 -05005724static ssize_t
5725trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5726 loff_t *ppos)
5727{
5728 long index = (long)filp->private_data;
5729 char *buf;
5730
5731 if (trace_flags & (1 << index))
5732 buf = "1\n";
5733 else
5734 buf = "0\n";
5735
5736 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5737}
5738
5739static ssize_t
5740trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5741 loff_t *ppos)
5742{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005743 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005744 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005745 unsigned long val;
5746 int ret;
5747
Peter Huewe22fe9b52011-06-07 21:58:27 +02005748 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5749 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005750 return ret;
5751
Zhaoleif2d84b62009-08-07 18:55:48 +08005752 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005753 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005754
5755 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005756 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005757 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005758
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005759 if (ret < 0)
5760 return ret;
5761
Steven Rostedta8259072009-02-26 22:19:12 -05005762 *ppos += cnt;
5763
5764 return cnt;
5765}
5766
Steven Rostedta8259072009-02-26 22:19:12 -05005767static const struct file_operations trace_options_core_fops = {
5768 .open = tracing_open_generic,
5769 .read = trace_options_core_read,
5770 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005771 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005772};
5773
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005774struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005775 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005776 struct dentry *parent,
5777 void *data,
5778 const struct file_operations *fops)
5779{
5780 struct dentry *ret;
5781
5782 ret = debugfs_create_file(name, mode, parent, data, fops);
5783 if (!ret)
5784 pr_warning("Could not create debugfs '%s' entry\n", name);
5785
5786 return ret;
5787}
5788
5789
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005790static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005791{
5792 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005793
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005794 if (tr->options)
5795 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005796
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005797 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005798 if (!d_tracer)
5799 return NULL;
5800
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005801 tr->options = debugfs_create_dir("options", d_tracer);
5802 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005803 pr_warning("Could not create debugfs directory 'options'\n");
5804 return NULL;
5805 }
5806
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005807 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005808}
5809
Steven Rostedt577b7852009-02-26 23:43:05 -05005810static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005811create_trace_option_file(struct trace_array *tr,
5812 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005813 struct tracer_flags *flags,
5814 struct tracer_opt *opt)
5815{
5816 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005817
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005819 if (!t_options)
5820 return;
5821
5822 topt->flags = flags;
5823 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005824 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005825
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005826 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005827 &trace_options_fops);
5828
Steven Rostedt577b7852009-02-26 23:43:05 -05005829}
5830
5831static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005832create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005833{
5834 struct trace_option_dentry *topts;
5835 struct tracer_flags *flags;
5836 struct tracer_opt *opts;
5837 int cnt;
5838
5839 if (!tracer)
5840 return NULL;
5841
5842 flags = tracer->flags;
5843
5844 if (!flags || !flags->opts)
5845 return NULL;
5846
5847 opts = flags->opts;
5848
5849 for (cnt = 0; opts[cnt].name; cnt++)
5850 ;
5851
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005852 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005853 if (!topts)
5854 return NULL;
5855
5856 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005857 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005858 &opts[cnt]);
5859
5860 return topts;
5861}
5862
5863static void
5864destroy_trace_option_files(struct trace_option_dentry *topts)
5865{
5866 int cnt;
5867
5868 if (!topts)
5869 return;
5870
5871 for (cnt = 0; topts[cnt].opt; cnt++) {
5872 if (topts[cnt].entry)
5873 debugfs_remove(topts[cnt].entry);
5874 }
5875
5876 kfree(topts);
5877}
5878
Steven Rostedta8259072009-02-26 22:19:12 -05005879static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005880create_trace_option_core_file(struct trace_array *tr,
5881 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005882{
5883 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005884
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005885 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005886 if (!t_options)
5887 return NULL;
5888
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005889 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005890 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005891}
5892
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005893static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005894{
5895 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005896 int i;
5897
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005898 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005899 if (!t_options)
5900 return;
5901
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005902 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005903 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005904}
5905
Steven Rostedt499e5472012-02-22 15:50:28 -05005906static ssize_t
5907rb_simple_read(struct file *filp, char __user *ubuf,
5908 size_t cnt, loff_t *ppos)
5909{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005910 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005911 char buf[64];
5912 int r;
5913
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005914 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005915 r = sprintf(buf, "%d\n", r);
5916
5917 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5918}
5919
5920static ssize_t
5921rb_simple_write(struct file *filp, const char __user *ubuf,
5922 size_t cnt, loff_t *ppos)
5923{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005924 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005925 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005926 unsigned long val;
5927 int ret;
5928
5929 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5930 if (ret)
5931 return ret;
5932
5933 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005934 mutex_lock(&trace_types_lock);
5935 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005936 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005937 if (tr->current_trace->start)
5938 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005939 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005940 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005941 if (tr->current_trace->stop)
5942 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005943 }
5944 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005945 }
5946
5947 (*ppos)++;
5948
5949 return cnt;
5950}
5951
5952static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005953 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005954 .read = rb_simple_read,
5955 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005956 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005957 .llseek = default_llseek,
5958};
5959
Steven Rostedt277ba042012-08-03 16:10:49 -04005960struct dentry *trace_instance_dir;
5961
5962static void
5963init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5964
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005965static int
5966allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04005967{
5968 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005969
5970 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5971
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05005972 buf->tr = tr;
5973
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005974 buf->buffer = ring_buffer_alloc(size, rb_flags);
5975 if (!buf->buffer)
5976 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005977
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005978 buf->data = alloc_percpu(struct trace_array_cpu);
5979 if (!buf->data) {
5980 ring_buffer_free(buf->buffer);
5981 return -ENOMEM;
5982 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005983
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005984 /* Allocate the first page for all buffers */
5985 set_buffer_entries(&tr->trace_buffer,
5986 ring_buffer_size(tr->trace_buffer.buffer, 0));
5987
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005988 return 0;
5989}
5990
5991static int allocate_trace_buffers(struct trace_array *tr, int size)
5992{
5993 int ret;
5994
5995 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5996 if (ret)
5997 return ret;
5998
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005999#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006000 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6001 allocate_snapshot ? size : 1);
6002 if (WARN_ON(ret)) {
6003 ring_buffer_free(tr->trace_buffer.buffer);
6004 free_percpu(tr->trace_buffer.data);
6005 return -ENOMEM;
6006 }
6007 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006008
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006009 /*
6010 * Only the top level trace array gets its snapshot allocated
6011 * from the kernel command line.
6012 */
6013 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006014#endif
6015 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006016}
6017
6018static int new_instance_create(const char *name)
6019{
Steven Rostedt277ba042012-08-03 16:10:49 -04006020 struct trace_array *tr;
6021 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006022
6023 mutex_lock(&trace_types_lock);
6024
6025 ret = -EEXIST;
6026 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6027 if (tr->name && strcmp(tr->name, name) == 0)
6028 goto out_unlock;
6029 }
6030
6031 ret = -ENOMEM;
6032 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6033 if (!tr)
6034 goto out_unlock;
6035
6036 tr->name = kstrdup(name, GFP_KERNEL);
6037 if (!tr->name)
6038 goto out_free_tr;
6039
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006040 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6041 goto out_free_tr;
6042
6043 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6044
Steven Rostedt277ba042012-08-03 16:10:49 -04006045 raw_spin_lock_init(&tr->start_lock);
6046
6047 tr->current_trace = &nop_trace;
6048
6049 INIT_LIST_HEAD(&tr->systems);
6050 INIT_LIST_HEAD(&tr->events);
6051
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006052 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006053 goto out_free_tr;
6054
Steven Rostedt277ba042012-08-03 16:10:49 -04006055 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6056 if (!tr->dir)
6057 goto out_free_tr;
6058
6059 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006060 if (ret) {
6061 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006062 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006063 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006064
6065 init_tracer_debugfs(tr, tr->dir);
6066
6067 list_add(&tr->list, &ftrace_trace_arrays);
6068
6069 mutex_unlock(&trace_types_lock);
6070
6071 return 0;
6072
6073 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006074 if (tr->trace_buffer.buffer)
6075 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006076 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006077 kfree(tr->name);
6078 kfree(tr);
6079
6080 out_unlock:
6081 mutex_unlock(&trace_types_lock);
6082
6083 return ret;
6084
6085}
6086
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006087static int instance_delete(const char *name)
6088{
6089 struct trace_array *tr;
6090 int found = 0;
6091 int ret;
6092
6093 mutex_lock(&trace_types_lock);
6094
6095 ret = -ENODEV;
6096 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6097 if (tr->name && strcmp(tr->name, name) == 0) {
6098 found = 1;
6099 break;
6100 }
6101 }
6102 if (!found)
6103 goto out_unlock;
6104
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006105 ret = -EBUSY;
6106 if (tr->ref)
6107 goto out_unlock;
6108
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006109 list_del(&tr->list);
6110
6111 event_trace_del_tracer(tr);
6112 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006113 free_percpu(tr->trace_buffer.data);
6114 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006115
6116 kfree(tr->name);
6117 kfree(tr);
6118
6119 ret = 0;
6120
6121 out_unlock:
6122 mutex_unlock(&trace_types_lock);
6123
6124 return ret;
6125}
6126
Steven Rostedt277ba042012-08-03 16:10:49 -04006127static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6128{
6129 struct dentry *parent;
6130 int ret;
6131
6132 /* Paranoid: Make sure the parent is the "instances" directory */
6133 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6134 if (WARN_ON_ONCE(parent != trace_instance_dir))
6135 return -ENOENT;
6136
6137 /*
6138 * The inode mutex is locked, but debugfs_create_dir() will also
6139 * take the mutex. As the instances directory can not be destroyed
6140 * or changed in any other way, it is safe to unlock it, and
6141 * let the dentry try. If two users try to make the same dir at
6142 * the same time, then the new_instance_create() will determine the
6143 * winner.
6144 */
6145 mutex_unlock(&inode->i_mutex);
6146
6147 ret = new_instance_create(dentry->d_iname);
6148
6149 mutex_lock(&inode->i_mutex);
6150
6151 return ret;
6152}
6153
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006154static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6155{
6156 struct dentry *parent;
6157 int ret;
6158
6159 /* Paranoid: Make sure the parent is the "instances" directory */
6160 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6161 if (WARN_ON_ONCE(parent != trace_instance_dir))
6162 return -ENOENT;
6163
6164 /* The caller did a dget() on dentry */
6165 mutex_unlock(&dentry->d_inode->i_mutex);
6166
6167 /*
6168 * The inode mutex is locked, but debugfs_create_dir() will also
6169 * take the mutex. As the instances directory can not be destroyed
6170 * or changed in any other way, it is safe to unlock it, and
6171 * let the dentry try. If two users try to make the same dir at
6172 * the same time, then the instance_delete() will determine the
6173 * winner.
6174 */
6175 mutex_unlock(&inode->i_mutex);
6176
6177 ret = instance_delete(dentry->d_iname);
6178
6179 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6180 mutex_lock(&dentry->d_inode->i_mutex);
6181
6182 return ret;
6183}
6184
Steven Rostedt277ba042012-08-03 16:10:49 -04006185static const struct inode_operations instance_dir_inode_operations = {
6186 .lookup = simple_lookup,
6187 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006188 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006189};
6190
6191static __init void create_trace_instances(struct dentry *d_tracer)
6192{
6193 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6194 if (WARN_ON(!trace_instance_dir))
6195 return;
6196
6197 /* Hijack the dir inode operations, to allow mkdir */
6198 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6199}
6200
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006201static void
6202init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6203{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006204 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006205
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006206 trace_create_file("tracing_cpumask", 0644, d_tracer,
6207 tr, &tracing_cpumask_fops);
6208
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006209 trace_create_file("trace_options", 0644, d_tracer,
6210 tr, &tracing_iter_fops);
6211
6212 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006213 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006214
6215 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006216 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006217
6218 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006219 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006220
6221 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6222 tr, &tracing_total_entries_fops);
6223
Wang YanQing238ae932013-05-26 16:52:01 +08006224 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006225 tr, &tracing_free_buffer_fops);
6226
6227 trace_create_file("trace_marker", 0220, d_tracer,
6228 tr, &tracing_mark_fops);
6229
6230 trace_create_file("trace_clock", 0644, d_tracer, tr,
6231 &trace_clock_fops);
6232
6233 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006234 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006235
6236#ifdef CONFIG_TRACER_SNAPSHOT
6237 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006238 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006239#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006240
6241 for_each_tracing_cpu(cpu)
6242 tracing_init_debugfs_percpu(tr, cpu);
6243
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006244}
6245
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006246static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006247{
6248 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006249
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006250 trace_access_lock_init();
6251
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006252 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006253 if (!d_tracer)
6254 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006255
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006256 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006257
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006258 trace_create_file("available_tracers", 0444, d_tracer,
6259 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006260
Li Zefan339ae5d2009-04-17 10:34:30 +08006261 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006262 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006263
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04006264#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006265 trace_create_file("tracing_max_latency", 0644, d_tracer,
6266 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08006267#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006268
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006269 trace_create_file("tracing_thresh", 0644, d_tracer,
6270 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006271
Li Zefan339ae5d2009-04-17 10:34:30 +08006272 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006273 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006274
Avadh Patel69abe6a2009-04-10 16:04:48 -04006275 trace_create_file("saved_cmdlines", 0444, d_tracer,
6276 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006277
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006278#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006279 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6280 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006281#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006282
Steven Rostedt277ba042012-08-03 16:10:49 -04006283 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006284
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006285 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006286
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006287 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006288}
6289
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006290static int trace_panic_handler(struct notifier_block *this,
6291 unsigned long event, void *unused)
6292{
Steven Rostedt944ac422008-10-23 19:26:08 -04006293 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006294 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006295 return NOTIFY_OK;
6296}
6297
6298static struct notifier_block trace_panic_notifier = {
6299 .notifier_call = trace_panic_handler,
6300 .next = NULL,
6301 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6302};
6303
6304static int trace_die_handler(struct notifier_block *self,
6305 unsigned long val,
6306 void *data)
6307{
6308 switch (val) {
6309 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006310 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006311 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006312 break;
6313 default:
6314 break;
6315 }
6316 return NOTIFY_OK;
6317}
6318
6319static struct notifier_block trace_die_notifier = {
6320 .notifier_call = trace_die_handler,
6321 .priority = 200
6322};
6323
6324/*
6325 * printk is set to max of 1024, we really don't need it that big.
6326 * Nothing should be printing 1000 characters anyway.
6327 */
6328#define TRACE_MAX_PRINT 1000
6329
6330/*
6331 * Define here KERN_TRACE so that we have one place to modify
6332 * it if we decide to change what log level the ftrace dump
6333 * should be at.
6334 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006335#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006336
Jason Wessel955b61e2010-08-05 09:22:23 -05006337void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006338trace_printk_seq(struct trace_seq *s)
6339{
6340 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006341 if (s->len >= TRACE_MAX_PRINT)
6342 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006343
6344 /* should be zero ended, but we are paranoid. */
6345 s->buffer[s->len] = 0;
6346
6347 printk(KERN_TRACE "%s", s->buffer);
6348
Steven Rostedtf9520752009-03-02 14:04:40 -05006349 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006350}
6351
Jason Wessel955b61e2010-08-05 09:22:23 -05006352void trace_init_global_iter(struct trace_iterator *iter)
6353{
6354 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006355 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006356 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006357 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006358
6359 if (iter->trace && iter->trace->open)
6360 iter->trace->open(iter);
6361
6362 /* Annotate start of buffers if we had overruns */
6363 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6364 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6365
6366 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6367 if (trace_clocks[iter->tr->clock_id].in_ns)
6368 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006369}
6370
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006371void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006372{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006373 /* use static because iter can be a bit big for the stack */
6374 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006375 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006376 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006377 unsigned long flags;
6378 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006379
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006380 /* Only allow one dump user at a time. */
6381 if (atomic_inc_return(&dump_running) != 1) {
6382 atomic_dec(&dump_running);
6383 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006384 }
6385
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006386 /*
6387 * Always turn off tracing when we dump.
6388 * We don't need to show trace output of what happens
6389 * between multiple crashes.
6390 *
6391 * If the user does a sysrq-z, then they can re-enable
6392 * tracing with echo 1 > tracing_on.
6393 */
6394 tracing_off();
6395
6396 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006397
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006398 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006399 trace_init_global_iter(&iter);
6400
Steven Rostedtd7690412008-10-01 00:29:53 -04006401 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006402 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006403 }
6404
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006405 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6406
Török Edwinb54d3de2008-11-22 13:28:48 +02006407 /* don't look at user memory in panic mode */
6408 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6409
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006410 switch (oops_dump_mode) {
6411 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006412 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006413 break;
6414 case DUMP_ORIG:
6415 iter.cpu_file = raw_smp_processor_id();
6416 break;
6417 case DUMP_NONE:
6418 goto out_enable;
6419 default:
6420 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006421 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006422 }
6423
6424 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006425
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006426 /* Did function tracer already get disabled? */
6427 if (ftrace_is_dead()) {
6428 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6429 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6430 }
6431
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006432 /*
6433 * We need to stop all tracing on all CPUS to read the
6434 * the next buffer. This is a bit expensive, but is
6435 * not done often. We fill all what we can read,
6436 * and then release the locks again.
6437 */
6438
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006439 while (!trace_empty(&iter)) {
6440
6441 if (!cnt)
6442 printk(KERN_TRACE "---------------------------------\n");
6443
6444 cnt++;
6445
6446 /* reset all but tr, trace, and overruns */
6447 memset(&iter.seq, 0,
6448 sizeof(struct trace_iterator) -
6449 offsetof(struct trace_iterator, seq));
6450 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6451 iter.pos = -1;
6452
Jason Wessel955b61e2010-08-05 09:22:23 -05006453 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006454 int ret;
6455
6456 ret = print_trace_line(&iter);
6457 if (ret != TRACE_TYPE_NO_CONSUME)
6458 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006459 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006460 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006461
6462 trace_printk_seq(&iter.seq);
6463 }
6464
6465 if (!cnt)
6466 printk(KERN_TRACE " (ftrace buffer empty)\n");
6467 else
6468 printk(KERN_TRACE "---------------------------------\n");
6469
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006470 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006471 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006472
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006473 for_each_tracing_cpu(cpu) {
6474 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006475 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006476 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006477 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006478}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006479EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006480
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006481__init static int tracer_alloc_buffers(void)
6482{
Steven Rostedt73c51622009-03-11 13:42:01 -04006483 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306484 int ret = -ENOMEM;
6485
David Sharp750912f2010-12-08 13:46:47 -08006486
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306487 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6488 goto out;
6489
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006490 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306491 goto out_free_buffer_mask;
6492
Steven Rostedt07d777f2011-09-22 14:01:55 -04006493 /* Only allocate trace_printk buffers if a trace_printk exists */
6494 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006495 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006496 trace_printk_init_buffers();
6497
Steven Rostedt73c51622009-03-11 13:42:01 -04006498 /* To save memory, keep the ring buffer size to its minimum */
6499 if (ring_buffer_expanded)
6500 ring_buf_size = trace_buf_size;
6501 else
6502 ring_buf_size = 1;
6503
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306504 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006505 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006506
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006507 raw_spin_lock_init(&global_trace.start_lock);
6508
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006509 /* Used for event triggers */
6510 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6511 if (!temp_buffer)
6512 goto out_free_cpumask;
6513
Steven Rostedtab464282008-05-12 21:21:00 +02006514 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006515 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006516 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6517 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006518 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006519 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006520
Steven Rostedt499e5472012-02-22 15:50:28 -05006521 if (global_trace.buffer_disabled)
6522 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006523
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006524 trace_init_cmdlines();
6525
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006526 /*
6527 * register_tracer() might reference current_trace, so it
6528 * needs to be set before we register anything. This is
6529 * just a bootstrap of current_trace anyway.
6530 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006531 global_trace.current_trace = &nop_trace;
6532
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006533 register_tracer(&nop_trace);
6534
Steven Rostedt60a11772008-05-12 21:20:44 +02006535 /* All seems OK, enable tracing */
6536 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006537
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006538 atomic_notifier_chain_register(&panic_notifier_list,
6539 &trace_panic_notifier);
6540
6541 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006542
Steven Rostedtae63b312012-05-03 23:09:03 -04006543 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6544
6545 INIT_LIST_HEAD(&global_trace.systems);
6546 INIT_LIST_HEAD(&global_trace.events);
6547 list_add(&global_trace.list, &ftrace_trace_arrays);
6548
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006549 while (trace_boot_options) {
6550 char *option;
6551
6552 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006553 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006554 }
6555
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006556 register_snapshot_cmd();
6557
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006558 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006559
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006560out_free_temp_buffer:
6561 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306562out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006563 free_percpu(global_trace.trace_buffer.data);
6564#ifdef CONFIG_TRACER_MAX_TRACE
6565 free_percpu(global_trace.max_buffer.data);
6566#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006567 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306568out_free_buffer_mask:
6569 free_cpumask_var(tracing_buffer_mask);
6570out:
6571 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006572}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006573
6574__init static int clear_boot_tracer(void)
6575{
6576 /*
6577 * The default tracer at boot buffer is an init section.
6578 * This function is called in lateinit. If we did not
6579 * find the boot tracer, then clear it out, to prevent
6580 * later registration from accessing the buffer that is
6581 * about to be freed.
6582 */
6583 if (!default_bootup_tracer)
6584 return 0;
6585
6586 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6587 default_bootup_tracer);
6588 default_bootup_tracer = NULL;
6589
6590 return 0;
6591}
6592
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006593early_initcall(tracer_alloc_buffers);
6594fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006595late_initcall(clear_boot_tracer);