blob: 24c1f23825579df4f2bf46e2dca98e6af8fb4abd [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040082 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
Steven Rostedt0f048702008-11-05 16:05:44 -050089 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
Hannes Eder4fd27352009-02-10 19:44:12 +010094static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050095
Christoph Lameter9288f992009-10-07 19:17:45 -040096DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500121static int tracing_set_tracer(const char *buf);
122
Li Zefanee6c2c12009-09-18 14:06:47 +0800123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500125static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100126
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500127static bool allocate_snapshot;
128
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200129static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100130{
Chen Gang67012ab2013-04-08 12:06:44 +0800131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500132 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400133 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500134 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135 return 1;
136}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200137__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100138
Steven Rostedt944ac422008-10-23 19:26:08 -0400139static int __init set_ftrace_dump_on_oops(char *str)
140{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200154
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400162static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400169__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400183
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800184unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200203static struct trace_array global_trace;
204
Steven Rostedtae63b312012-05-03 23:09:03 -0400205LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200206
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400207int trace_array_get(struct trace_array *this_tr)
208{
209 struct trace_array *tr;
210 int ret = -ENODEV;
211
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214 if (tr == this_tr) {
215 tr->ref++;
216 ret = 0;
217 break;
218 }
219 }
220 mutex_unlock(&trace_types_lock);
221
222 return ret;
223}
224
225static void __trace_array_put(struct trace_array *this_tr)
226{
227 WARN_ON(!this_tr->ref);
228 this_tr->ref--;
229}
230
231void trace_array_put(struct trace_array *this_tr)
232{
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
236}
237
Tom Zanussif306cc82013-10-24 08:34:17 -0500238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500241{
Tom Zanussif306cc82013-10-24 08:34:17 -0500242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500249}
Tom Zanussif306cc82013-10-24 08:34:17 -0500250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
263}
264EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265
Alexander Z Lam94571582013-08-02 18:36:16 -0700266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400267{
268 u64 ts;
269
270 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700271 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400272 return trace_clock_local();
273
Alexander Z Lam94571582013-08-02 18:36:16 -0700274 ts = ring_buffer_time_stamp(buf->buffer, cpu);
275 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400276
277 return ts;
278}
279
Alexander Z Lam94571582013-08-02 18:36:16 -0700280cycle_t ftrace_now(int cpu)
281{
282 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
283}
284
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400285/**
286 * tracing_is_enabled - Show if global_trace has been disabled
287 *
288 * Shows if the global trace has been enabled or not. It uses the
289 * mirror flag "buffer_disabled" to be used in fast paths such as for
290 * the irqsoff tracer. But it may be inaccurate due to races. If you
291 * need to know the accurate state, use tracing_is_on() which is a little
292 * slower, but accurate.
293 */
Steven Rostedt90369902008-11-05 16:05:44 -0500294int tracing_is_enabled(void)
295{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400296 /*
297 * For quick access (irqsoff uses this in fast path), just
298 * return the mirror variable of the state of the ring buffer.
299 * It's a little racy, but we don't really care.
300 */
301 smp_rmb();
302 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500303}
304
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200305/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400306 * trace_buf_size is the size in bytes that is allocated
307 * for a buffer. Note, the number of bytes is always rounded
308 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400309 *
310 * This number is purposely set to a low number of 16384.
311 * If the dump on oops happens, it will be much appreciated
312 * to not have to wait for all that output. Anyway this can be
313 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200314 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400315#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400316
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400317static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200318
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200319/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200320static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200321
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200322/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200323 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200324 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700325DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800327/*
328 * serialize the access of the ring buffer
329 *
330 * ring buffer serializes readers, but it is low level protection.
331 * The validity of the events (which returns by ring_buffer_peek() ..etc)
332 * are not protected by ring buffer.
333 *
334 * The content of events may become garbage if we allow other process consumes
335 * these events concurrently:
336 * A) the page of the consumed events may become a normal page
337 * (not reader page) in ring buffer, and this page will be rewrited
338 * by events producer.
339 * B) The page of the consumed events may become a page for splice_read,
340 * and this page will be returned to system.
341 *
342 * These primitives allow multi process access to different cpu ring buffer
343 * concurrently.
344 *
345 * These primitives don't distinguish read-only and read-consume access.
346 * Multi read-only access are also serialized.
347 */
348
349#ifdef CONFIG_SMP
350static DECLARE_RWSEM(all_cpu_access_lock);
351static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
352
353static inline void trace_access_lock(int cpu)
354{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500355 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800356 /* gain it for accessing the whole ring buffer. */
357 down_write(&all_cpu_access_lock);
358 } else {
359 /* gain it for accessing a cpu ring buffer. */
360
Steven Rostedtae3b5092013-01-23 15:22:59 -0500361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800362 down_read(&all_cpu_access_lock);
363
364 /* Secondly block other access to this @cpu ring buffer. */
365 mutex_lock(&per_cpu(cpu_access_lock, cpu));
366 }
367}
368
369static inline void trace_access_unlock(int cpu)
370{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500371 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800372 up_write(&all_cpu_access_lock);
373 } else {
374 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
375 up_read(&all_cpu_access_lock);
376 }
377}
378
379static inline void trace_access_lock_init(void)
380{
381 int cpu;
382
383 for_each_possible_cpu(cpu)
384 mutex_init(&per_cpu(cpu_access_lock, cpu));
385}
386
387#else
388
389static DEFINE_MUTEX(access_lock);
390
391static inline void trace_access_lock(int cpu)
392{
393 (void)cpu;
394 mutex_lock(&access_lock);
395}
396
397static inline void trace_access_unlock(int cpu)
398{
399 (void)cpu;
400 mutex_unlock(&access_lock);
401}
402
403static inline void trace_access_lock_init(void)
404{
405}
406
407#endif
408
Steven Rostedtee6bce52008-11-12 17:52:37 -0500409/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500410unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400411 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500412 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400413 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700414
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400415static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400416{
417 if (tr->trace_buffer.buffer)
418 ring_buffer_record_on(tr->trace_buffer.buffer);
419 /*
420 * This flag is looked at when buffers haven't been allocated
421 * yet, or by some tracers (like irqsoff), that just want to
422 * know if the ring buffer has been disabled, but it can handle
423 * races of where it gets disabled but we still do a record.
424 * As the check is in the fast path of the tracers, it is more
425 * important to be fast than accurate.
426 */
427 tr->buffer_disabled = 0;
428 /* Make the flag seen by readers */
429 smp_wmb();
430}
431
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200432/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500433 * tracing_on - enable tracing buffers
434 *
435 * This function enables tracing buffers that may have been
436 * disabled with tracing_off.
437 */
438void tracing_on(void)
439{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500441}
442EXPORT_SYMBOL_GPL(tracing_on);
443
444/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
449 */
450int __trace_puts(unsigned long ip, const char *str, int size)
451{
452 struct ring_buffer_event *event;
453 struct ring_buffer *buffer;
454 struct print_entry *entry;
455 unsigned long irq_flags;
456 int alloc;
457
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500458 if (unlikely(tracing_selftest_running || tracing_disabled))
459 return 0;
460
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500461 alloc = sizeof(*entry) + size + 2; /* possible \n added */
462
463 local_save_flags(irq_flags);
464 buffer = global_trace.trace_buffer.buffer;
465 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
466 irq_flags, preempt_count());
467 if (!event)
468 return 0;
469
470 entry = ring_buffer_event_data(event);
471 entry->ip = ip;
472
473 memcpy(&entry->buf, str, size);
474
475 /* Add a newline if necessary */
476 if (entry->buf[size - 1] != '\n') {
477 entry->buf[size] = '\n';
478 entry->buf[size + 1] = '\0';
479 } else
480 entry->buf[size] = '\0';
481
482 __buffer_unlock_commit(buffer, event);
483
484 return size;
485}
486EXPORT_SYMBOL_GPL(__trace_puts);
487
488/**
489 * __trace_bputs - write the pointer to a constant string into trace buffer
490 * @ip: The address of the caller
491 * @str: The constant string to write to the buffer to
492 */
493int __trace_bputs(unsigned long ip, const char *str)
494{
495 struct ring_buffer_event *event;
496 struct ring_buffer *buffer;
497 struct bputs_entry *entry;
498 unsigned long irq_flags;
499 int size = sizeof(struct bputs_entry);
500
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500501 if (unlikely(tracing_selftest_running || tracing_disabled))
502 return 0;
503
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500504 local_save_flags(irq_flags);
505 buffer = global_trace.trace_buffer.buffer;
506 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
507 irq_flags, preempt_count());
508 if (!event)
509 return 0;
510
511 entry = ring_buffer_event_data(event);
512 entry->ip = ip;
513 entry->str = str;
514
515 __buffer_unlock_commit(buffer, event);
516
517 return 1;
518}
519EXPORT_SYMBOL_GPL(__trace_bputs);
520
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500521#ifdef CONFIG_TRACER_SNAPSHOT
522/**
523 * trace_snapshot - take a snapshot of the current buffer.
524 *
525 * This causes a swap between the snapshot buffer and the current live
526 * tracing buffer. You can use this to take snapshots of the live
527 * trace when some condition is triggered, but continue to trace.
528 *
529 * Note, make sure to allocate the snapshot with either
530 * a tracing_snapshot_alloc(), or by doing it manually
531 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
532 *
533 * If the snapshot buffer is not allocated, it will stop tracing.
534 * Basically making a permanent snapshot.
535 */
536void tracing_snapshot(void)
537{
538 struct trace_array *tr = &global_trace;
539 struct tracer *tracer = tr->current_trace;
540 unsigned long flags;
541
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500542 if (in_nmi()) {
543 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
544 internal_trace_puts("*** snapshot is being ignored ***\n");
545 return;
546 }
547
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500548 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500549 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
550 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500551 tracing_off();
552 return;
553 }
554
555 /* Note, snapshot can not be used when the tracer uses it */
556 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500557 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
558 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559 return;
560 }
561
562 local_irq_save(flags);
563 update_max_tr(tr, current, smp_processor_id());
564 local_irq_restore(flags);
565}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500566EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500567
568static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
569 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400570static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
571
572static int alloc_snapshot(struct trace_array *tr)
573{
574 int ret;
575
576 if (!tr->allocated_snapshot) {
577
578 /* allocate spare buffer */
579 ret = resize_buffer_duplicate_size(&tr->max_buffer,
580 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
581 if (ret < 0)
582 return ret;
583
584 tr->allocated_snapshot = true;
585 }
586
587 return 0;
588}
589
590void free_snapshot(struct trace_array *tr)
591{
592 /*
593 * We don't free the ring buffer. instead, resize it because
594 * The max_tr ring buffer has some state (e.g. ring->clock) and
595 * we want preserve it.
596 */
597 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
598 set_buffer_entries(&tr->max_buffer, 1);
599 tracing_reset_online_cpus(&tr->max_buffer);
600 tr->allocated_snapshot = false;
601}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500602
603/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500604 * tracing_alloc_snapshot - allocate snapshot buffer.
605 *
606 * This only allocates the snapshot buffer if it isn't already
607 * allocated - it doesn't also take a snapshot.
608 *
609 * This is meant to be used in cases where the snapshot buffer needs
610 * to be set up for events that can't sleep but need to be able to
611 * trigger a snapshot.
612 */
613int tracing_alloc_snapshot(void)
614{
615 struct trace_array *tr = &global_trace;
616 int ret;
617
618 ret = alloc_snapshot(tr);
619 WARN_ON(ret < 0);
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
624
625/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
627 *
628 * This is similar to trace_snapshot(), but it will allocate the
629 * snapshot buffer if it isn't already allocated. Use this only
630 * where it is safe to sleep, as the allocation may sleep.
631 *
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
635 */
636void tracing_snapshot_alloc(void)
637{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 int ret;
639
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500640 ret = tracing_alloc_snapshot();
641 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400642 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500643
644 tracing_snapshot();
645}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500646EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500647#else
648void tracing_snapshot(void)
649{
650 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
651}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500652EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500653int tracing_alloc_snapshot(void)
654{
655 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
656 return -ENODEV;
657}
658EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659void tracing_snapshot_alloc(void)
660{
661 /* Give warning */
662 tracing_snapshot();
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500665#endif /* CONFIG_TRACER_SNAPSHOT */
666
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400667static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400668{
669 if (tr->trace_buffer.buffer)
670 ring_buffer_record_off(tr->trace_buffer.buffer);
671 /*
672 * This flag is looked at when buffers haven't been allocated
673 * yet, or by some tracers (like irqsoff), that just want to
674 * know if the ring buffer has been disabled, but it can handle
675 * races of where it gets disabled but we still do a record.
676 * As the check is in the fast path of the tracers, it is more
677 * important to be fast than accurate.
678 */
679 tr->buffer_disabled = 1;
680 /* Make the flag seen by readers */
681 smp_wmb();
682}
683
Steven Rostedt499e5472012-02-22 15:50:28 -0500684/**
685 * tracing_off - turn off tracing buffers
686 *
687 * This function stops the tracing buffers from recording data.
688 * It does not disable any overhead the tracers themselves may
689 * be causing. This function simply causes all recording to
690 * the ring buffers to fail.
691 */
692void tracing_off(void)
693{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500695}
696EXPORT_SYMBOL_GPL(tracing_off);
697
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400698void disable_trace_on_warning(void)
699{
700 if (__disable_trace_on_warning)
701 tracing_off();
702}
703
Steven Rostedt499e5472012-02-22 15:50:28 -0500704/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400705 * tracer_tracing_is_on - show real state of ring buffer enabled
706 * @tr : the trace array to know if ring buffer is enabled
707 *
708 * Shows real state of the ring buffer if it is enabled or not.
709 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400710static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400711{
712 if (tr->trace_buffer.buffer)
713 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
714 return !tr->buffer_disabled;
715}
716
Steven Rostedt499e5472012-02-22 15:50:28 -0500717/**
718 * tracing_is_on - show state of ring buffers enabled
719 */
720int tracing_is_on(void)
721{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400722 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500723}
724EXPORT_SYMBOL_GPL(tracing_is_on);
725
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400726static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200727{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400728 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200729
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200730 if (!str)
731 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800732 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200733 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800734 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200735 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400736 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200737 return 1;
738}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400739__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200740
Tim Bird0e950172010-02-25 15:36:43 -0800741static int __init set_tracing_thresh(char *str)
742{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800743 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800744 int ret;
745
746 if (!str)
747 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200748 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800749 if (ret < 0)
750 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800751 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800752 return 1;
753}
754__setup("tracing_thresh=", set_tracing_thresh);
755
Steven Rostedt57f50be2008-05-12 21:20:44 +0200756unsigned long nsecs_to_usecs(unsigned long nsecs)
757{
758 return nsecs / 1000;
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200762static const char *trace_options[] = {
763 "print-parent",
764 "sym-offset",
765 "sym-addr",
766 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200767 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200768 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200769 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200770 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200771 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100772 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500773 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500774 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500775 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200776 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200777 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100778 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200779 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500780 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400781 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400782 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800783 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800784 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400785 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500786 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700787 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400788 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200789 NULL
790};
791
Zhaolei5079f322009-08-25 16:12:56 +0800792static struct {
793 u64 (*func)(void);
794 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800795 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800796} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800797 { trace_clock_local, "local", 1 },
798 { trace_clock_global, "global", 1 },
799 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400800 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400801 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800802 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800803};
804
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200805/*
806 * trace_parser_get_init - gets the buffer for trace parser
807 */
808int trace_parser_get_init(struct trace_parser *parser, int size)
809{
810 memset(parser, 0, sizeof(*parser));
811
812 parser->buffer = kmalloc(size, GFP_KERNEL);
813 if (!parser->buffer)
814 return 1;
815
816 parser->size = size;
817 return 0;
818}
819
820/*
821 * trace_parser_put - frees the buffer for trace parser
822 */
823void trace_parser_put(struct trace_parser *parser)
824{
825 kfree(parser->buffer);
826}
827
828/*
829 * trace_get_user - reads the user input string separated by space
830 * (matched by isspace(ch))
831 *
832 * For each string found the 'struct trace_parser' is updated,
833 * and the function returns.
834 *
835 * Returns number of bytes read.
836 *
837 * See kernel/trace/trace.h for 'struct trace_parser' details.
838 */
839int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
840 size_t cnt, loff_t *ppos)
841{
842 char ch;
843 size_t read = 0;
844 ssize_t ret;
845
846 if (!*ppos)
847 trace_parser_clear(parser);
848
849 ret = get_user(ch, ubuf++);
850 if (ret)
851 goto out;
852
853 read++;
854 cnt--;
855
856 /*
857 * The parser is not finished with the last write,
858 * continue reading the user input without skipping spaces.
859 */
860 if (!parser->cont) {
861 /* skip white space */
862 while (cnt && isspace(ch)) {
863 ret = get_user(ch, ubuf++);
864 if (ret)
865 goto out;
866 read++;
867 cnt--;
868 }
869
870 /* only spaces were written */
871 if (isspace(ch)) {
872 *ppos += read;
873 ret = read;
874 goto out;
875 }
876
877 parser->idx = 0;
878 }
879
880 /* read the non-space input */
881 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800882 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200883 parser->buffer[parser->idx++] = ch;
884 else {
885 ret = -EINVAL;
886 goto out;
887 }
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891 read++;
892 cnt--;
893 }
894
895 /* We either got finished input or we have to wait for another call. */
896 if (isspace(ch)) {
897 parser->buffer[parser->idx] = 0;
898 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400899 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200900 parser->cont = true;
901 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400902 } else {
903 ret = -EINVAL;
904 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200905 }
906
907 *ppos += read;
908 ret = read;
909
910out:
911 return ret;
912}
913
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200914ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
915{
916 int len;
917 int ret;
918
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500919 if (!cnt)
920 return 0;
921
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200922 if (s->len <= s->readpos)
923 return -EBUSY;
924
925 len = s->len - s->readpos;
926 if (cnt > len)
927 cnt = len;
928 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500929 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200930 return -EFAULT;
931
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500932 cnt -= ret;
933
Steven Rostedte74da522009-03-04 20:31:11 -0500934 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200935 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200936}
937
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200938static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200939{
940 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200941
942 if (s->len <= s->readpos)
943 return -EBUSY;
944
945 len = s->len - s->readpos;
946 if (cnt > len)
947 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300948 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200949
Steven Rostedte74da522009-03-04 20:31:11 -0500950 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951 return cnt;
952}
953
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400954/*
955 * ftrace_max_lock is used to protect the swapping of buffers
956 * when taking a max snapshot. The buffers themselves are
957 * protected by per_cpu spinlocks. But the action of the swap
958 * needs its own lock.
959 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100960 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400961 * with performance when lockdep debugging is enabled.
962 *
963 * It is also used in other places outside the update_max_tr
964 * so it needs to be defined outside of the
965 * CONFIG_TRACER_MAX_TRACE.
966 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100967static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100968 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
973unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974
975/*
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 */
980static void
981__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
982{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500983 struct trace_buffer *trace_buf = &tr->trace_buffer;
984 struct trace_buffer *max_buf = &tr->max_buffer;
985 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
986 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500988 max_buf->cpu = cpu;
989 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400990
Steven Rostedt8248ac02009-09-02 12:27:41 -0400991 max_data->saved_latency = tracing_max_latency;
992 max_data->critical_start = data->critical_start;
993 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300995 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400996 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400997 /*
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1000 */
1001 if (tsk == current)
1002 max_data->uid = current_uid();
1003 else
1004 max_data->uid = task_uid(tsk);
1005
Steven Rostedt8248ac02009-09-02 12:27:41 -04001006 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1007 max_data->policy = tsk->policy;
1008 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001009
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk);
1012}
1013
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001014/**
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1016 * @tr: tracer
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1019 *
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1022 */
Ingo Molnare309b412008-05-12 21:20:51 +02001023void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001024update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1025{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001026 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001027
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001028 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001029 return;
1030
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001031 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001033 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001034 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001035 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001036 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001037 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001038
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001039 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001040
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001041 buf = tr->trace_buffer.buffer;
1042 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1043 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001046 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001047}
1048
1049/**
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1051 * @tr - tracer
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001054 *
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056 */
Ingo Molnare309b412008-05-12 21:20:51 +02001057void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001062 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001063 return;
1064
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001065 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001066 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001067 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001070 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001071
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001072 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001074 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001075
Steven Rostedte8165db2009-09-03 19:13:05 -04001076 if (ret == -EBUSY) {
1077 /*
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1082 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001083 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001084 "Failed to swap buffers due to commit in progress\n");
1085 }
1086
Steven Rostedte8165db2009-09-03 19:13:05 -04001087 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088
1089 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001090 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001092#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001094static void default_wait_pipe(struct trace_iterator *iter)
1095{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter, iter->cpu_file))
1098 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001099
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001100 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001101}
1102
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001103#ifdef CONFIG_FTRACE_STARTUP_TEST
1104static int run_tracer_selftest(struct tracer *type)
1105{
1106 struct trace_array *tr = &global_trace;
1107 struct tracer *saved_tracer = tr->current_trace;
1108 int ret;
1109
1110 if (!type->selftest || tracing_selftest_disabled)
1111 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112
1113 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 * Run a selftest on this tracer.
1115 * Here we reset the trace buffer, and set the current
1116 * tracer to be this tracer. The tracer can then run some
1117 * internal tracing to verify that everything is in order.
1118 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001119 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001120 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001121
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001122 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001124#ifdef CONFIG_TRACER_MAX_TRACE
1125 if (type->use_max_tr) {
1126 /* If we expanded the buffers, make sure the max is expanded too */
1127 if (ring_buffer_expanded)
1128 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1129 RING_BUFFER_ALL_CPUS);
1130 tr->allocated_snapshot = true;
1131 }
1132#endif
1133
1134 /* the test is responsible for initializing and enabling */
1135 pr_info("Testing tracer %s: ", type->name);
1136 ret = type->selftest(type, tr);
1137 /* the test is responsible for resetting too */
1138 tr->current_trace = saved_tracer;
1139 if (ret) {
1140 printk(KERN_CONT "FAILED!\n");
1141 /* Add the warning after printing 'FAILED' */
1142 WARN_ON(1);
1143 return -1;
1144 }
1145 /* Only reset on passing, to avoid touching corrupted buffers */
1146 tracing_reset_online_cpus(&tr->trace_buffer);
1147
1148#ifdef CONFIG_TRACER_MAX_TRACE
1149 if (type->use_max_tr) {
1150 tr->allocated_snapshot = false;
1151
1152 /* Shrink the max buffer again */
1153 if (ring_buffer_expanded)
1154 ring_buffer_resize(tr->max_buffer.buffer, 1,
1155 RING_BUFFER_ALL_CPUS);
1156 }
1157#endif
1158
1159 printk(KERN_CONT "PASSED\n");
1160 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001161}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001162#else
1163static inline int run_tracer_selftest(struct tracer *type)
1164{
1165 return 0;
1166}
1167#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001168
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001169/**
1170 * register_tracer - register a tracer with the ftrace system.
1171 * @type - the plugin for the tracer
1172 *
1173 * Register a new plugin tracer.
1174 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001175int register_tracer(struct tracer *type)
1176{
1177 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178 int ret = 0;
1179
1180 if (!type->name) {
1181 pr_info("Tracer must have a name\n");
1182 return -1;
1183 }
1184
Dan Carpenter24a461d2010-07-10 12:06:44 +02001185 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001186 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1187 return -1;
1188 }
1189
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001190 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001191
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001192 tracing_selftest_running = true;
1193
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001194 for (t = trace_types; t; t = t->next) {
1195 if (strcmp(type->name, t->name) == 0) {
1196 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001197 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001198 type->name);
1199 ret = -1;
1200 goto out;
1201 }
1202 }
1203
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001204 if (!type->set_flag)
1205 type->set_flag = &dummy_set_flag;
1206 if (!type->flags)
1207 type->flags = &dummy_tracer_flags;
1208 else
1209 if (!type->flags->opts)
1210 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001211 if (!type->wait_pipe)
1212 type->wait_pipe = default_wait_pipe;
1213
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001214 ret = run_tracer_selftest(type);
1215 if (ret < 0)
1216 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 type->next = trace_types;
1219 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001220
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001221 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001222 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001223 mutex_unlock(&trace_types_lock);
1224
Steven Rostedtdac74942009-02-05 01:13:38 -05001225 if (ret || !default_bootup_tracer)
1226 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001227
Li Zefanee6c2c12009-09-18 14:06:47 +08001228 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001229 goto out_unlock;
1230
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name);
1234 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001236 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001237#ifdef CONFIG_FTRACE_STARTUP_TEST
1238 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1239 type->name);
1240#endif
1241
1242 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001243 return ret;
1244}
1245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001247{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001248 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001249
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001250 if (!buffer)
1251 return;
1252
Steven Rostedtf6339032009-09-04 12:35:16 -04001253 ring_buffer_record_disable(buffer);
1254
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001257 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001258
1259 ring_buffer_record_enable(buffer);
1260}
1261
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001262void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001263{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001264 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001265 int cpu;
1266
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001267 if (!buffer)
1268 return;
1269
Steven Rostedt621968c2009-09-04 12:02:35 -04001270 ring_buffer_record_disable(buffer);
1271
1272 /* Make sure all commits have finished */
1273 synchronize_sched();
1274
Alexander Z Lam94571582013-08-02 18:36:16 -07001275 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001276
1277 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001278 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001279
1280 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001281}
1282
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001283/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001284void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001285{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001286 struct trace_array *tr;
1287
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001289 tracing_reset_online_cpus(&tr->trace_buffer);
1290#ifdef CONFIG_TRACER_MAX_TRACE
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001293 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001294}
1295
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001296#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001297#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1299static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1300static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1301static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001302static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001303
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001305static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306
1307static void trace_init_cmdlines(void)
1308{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001309 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1310 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001311 cmdline_idx = 0;
1312}
1313
Carsten Emdeb5130b12009-09-13 01:43:07 +02001314int is_tracing_stopped(void)
1315{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001316 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001317}
1318
Steven Rostedt0f048702008-11-05 16:05:44 -05001319/**
1320 * tracing_start - quick start of the tracer
1321 *
1322 * If tracing is enabled but was stopped by tracing_stop,
1323 * this will start the tracer back up.
1324 */
1325void tracing_start(void)
1326{
1327 struct ring_buffer *buffer;
1328 unsigned long flags;
1329
1330 if (tracing_disabled)
1331 return;
1332
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001333 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1334 if (--global_trace.stop_count) {
1335 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001336 /* Someone screwed up their debugging */
1337 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001338 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001339 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001340 goto out;
1341 }
1342
Steven Rostedta2f80712010-03-12 19:56:00 -05001343 /* Prevent the buffers from switching */
1344 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001345
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001347 if (buffer)
1348 ring_buffer_record_enable(buffer);
1349
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001350#ifdef CONFIG_TRACER_MAX_TRACE
1351 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001352 if (buffer)
1353 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001354#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001355
Steven Rostedta2f80712010-03-12 19:56:00 -05001356 arch_spin_unlock(&ftrace_max_lock);
1357
Steven Rostedt0f048702008-11-05 16:05:44 -05001358 ftrace_start();
1359 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001360 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1361}
1362
1363static void tracing_start_tr(struct trace_array *tr)
1364{
1365 struct ring_buffer *buffer;
1366 unsigned long flags;
1367
1368 if (tracing_disabled)
1369 return;
1370
1371 /* If global, we need to also start the max tracer */
1372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1373 return tracing_start();
1374
1375 raw_spin_lock_irqsave(&tr->start_lock, flags);
1376
1377 if (--tr->stop_count) {
1378 if (tr->stop_count < 0) {
1379 /* Someone screwed up their debugging */
1380 WARN_ON_ONCE(1);
1381 tr->stop_count = 0;
1382 }
1383 goto out;
1384 }
1385
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001386 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001387 if (buffer)
1388 ring_buffer_record_enable(buffer);
1389
1390 out:
1391 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001392}
1393
1394/**
1395 * tracing_stop - quick stop of the tracer
1396 *
1397 * Light weight way to stop tracing. Use in conjunction with
1398 * tracing_start.
1399 */
1400void tracing_stop(void)
1401{
1402 struct ring_buffer *buffer;
1403 unsigned long flags;
1404
1405 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001406 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1407 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001408 goto out;
1409
Steven Rostedta2f80712010-03-12 19:56:00 -05001410 /* Prevent the buffers from switching */
1411 arch_spin_lock(&ftrace_max_lock);
1412
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001414 if (buffer)
1415 ring_buffer_record_disable(buffer);
1416
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001417#ifdef CONFIG_TRACER_MAX_TRACE
1418 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001419 if (buffer)
1420 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001421#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001422
Steven Rostedta2f80712010-03-12 19:56:00 -05001423 arch_spin_unlock(&ftrace_max_lock);
1424
Steven Rostedt0f048702008-11-05 16:05:44 -05001425 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001426 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1427}
1428
1429static void tracing_stop_tr(struct trace_array *tr)
1430{
1431 struct ring_buffer *buffer;
1432 unsigned long flags;
1433
1434 /* If global, we need to also stop the max tracer */
1435 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1436 return tracing_stop();
1437
1438 raw_spin_lock_irqsave(&tr->start_lock, flags);
1439 if (tr->stop_count++)
1440 goto out;
1441
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001442 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001443 if (buffer)
1444 ring_buffer_record_disable(buffer);
1445
1446 out:
1447 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001448}
1449
Ingo Molnare309b412008-05-12 21:20:51 +02001450void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001451
Ingo Molnare309b412008-05-12 21:20:51 +02001452static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001453{
Carsten Emdea635cf02009-03-18 09:00:41 +01001454 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001455
1456 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1457 return;
1458
1459 /*
1460 * It's not the end of the world if we don't get
1461 * the lock, but we also don't want to spin
1462 * nor do we want to disable interrupts,
1463 * so if we miss here, then better luck next time.
1464 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001465 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001466 return;
1467
1468 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001469 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1471
Carsten Emdea635cf02009-03-18 09:00:41 +01001472 /*
1473 * Check whether the cmdline buffer at idx has a pid
1474 * mapped. We are going to overwrite that entry so we
1475 * need to clear the map_pid_to_cmdline. Otherwise we
1476 * would read the new comm for the old pid.
1477 */
1478 pid = map_cmdline_to_pid[idx];
1479 if (pid != NO_CMDLINE_MAP)
1480 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481
Carsten Emdea635cf02009-03-18 09:00:41 +01001482 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483 map_pid_to_cmdline[tsk->pid] = idx;
1484
1485 cmdline_idx = idx;
1486 }
1487
1488 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1489
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001490 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001491}
1492
Steven Rostedt4ca53082009-03-16 19:20:15 -04001493void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495 unsigned map;
1496
Steven Rostedt4ca53082009-03-16 19:20:15 -04001497 if (!pid) {
1498 strcpy(comm, "<idle>");
1499 return;
1500 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501
Steven Rostedt74bf4072010-01-25 15:11:53 -05001502 if (WARN_ON_ONCE(pid < 0)) {
1503 strcpy(comm, "<XXX>");
1504 return;
1505 }
1506
Steven Rostedt4ca53082009-03-16 19:20:15 -04001507 if (pid > PID_MAX_DEFAULT) {
1508 strcpy(comm, "<...>");
1509 return;
1510 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001512 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001513 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001515 if (map != NO_CMDLINE_MAP)
1516 strcpy(comm, saved_cmdlines[map]);
1517 else
1518 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001520 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001521 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522}
1523
Ingo Molnare309b412008-05-12 21:20:51 +02001524void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001526 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 return;
1528
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001529 if (!__this_cpu_read(trace_cmdline_save))
1530 return;
1531
1532 __this_cpu_write(trace_cmdline_save, false);
1533
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534 trace_save_cmdline(tsk);
1535}
1536
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001537void
Steven Rostedt38697052008-10-01 13:14:09 -04001538tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1539 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
1541 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Steven Rostedt777e2082008-09-29 23:02:42 -04001543 entry->preempt_count = pc & 0xff;
1544 entry->pid = (tsk) ? tsk->pid : 0;
1545 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001546#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001547 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001548#else
1549 TRACE_FLAG_IRQS_NOSUPPORT |
1550#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001551 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1552 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001553 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1554 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001555}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001556EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Steven Rostedte77405a2009-09-02 14:17:06 -04001558struct ring_buffer_event *
1559trace_buffer_lock_reserve(struct ring_buffer *buffer,
1560 int type,
1561 unsigned long len,
1562 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001563{
1564 struct ring_buffer_event *event;
1565
Steven Rostedte77405a2009-09-02 14:17:06 -04001566 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001567 if (event != NULL) {
1568 struct trace_entry *ent = ring_buffer_event_data(event);
1569
1570 tracing_generic_entry_update(ent, flags, pc);
1571 ent->type = type;
1572 }
1573
1574 return event;
1575}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001576
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001577void
1578__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1579{
1580 __this_cpu_write(trace_cmdline_save, true);
1581 ring_buffer_unlock_commit(buffer, event);
1582}
1583
Steven Rostedte77405a2009-09-02 14:17:06 -04001584static inline void
1585__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1586 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001587 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001588{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001589 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001590
Steven Rostedte77405a2009-09-02 14:17:06 -04001591 ftrace_trace_stack(buffer, flags, 6, pc);
1592 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001593}
1594
Steven Rostedte77405a2009-09-02 14:17:06 -04001595void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
1597 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001598{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001599 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001600}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001602
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001603static struct ring_buffer *temp_buffer;
1604
Steven Rostedtef5580d2009-02-27 19:38:04 -05001605struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001606trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1607 struct ftrace_event_file *ftrace_file,
1608 int type, unsigned long len,
1609 unsigned long flags, int pc)
1610{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001611 struct ring_buffer_event *entry;
1612
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001613 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001614 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001615 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001616 /*
1617 * If tracing is off, but we have triggers enabled
1618 * we still need to look at the event data. Use the temp_buffer
1619 * to store the trace event for the tigger to use. It's recusive
1620 * safe and will not be recorded anywhere.
1621 */
1622 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1623 *current_rb = temp_buffer;
1624 entry = trace_buffer_lock_reserve(*current_rb,
1625 type, len, flags, pc);
1626 }
1627 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001628}
1629EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1630
1631struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001632trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1633 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001634 unsigned long flags, int pc)
1635{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001636 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001637 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001638 type, len, flags, pc);
1639}
Steven Rostedt94487d62009-05-05 19:22:53 -04001640EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001641
Steven Rostedte77405a2009-09-02 14:17:06 -04001642void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1643 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001644 unsigned long flags, int pc)
1645{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001646 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001647}
Steven Rostedt94487d62009-05-05 19:22:53 -04001648EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001649
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1651 struct ring_buffer_event *event,
1652 unsigned long flags, int pc,
1653 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001654{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001655 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001656
1657 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1658 ftrace_trace_userstack(buffer, flags, pc);
1659}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001660EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001661
Steven Rostedte77405a2009-09-02 14:17:06 -04001662void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1663 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001664{
Steven Rostedte77405a2009-09-02 14:17:06 -04001665 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001666}
Steven Rostedt12acd472009-04-17 16:01:56 -04001667EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001668
Ingo Molnare309b412008-05-12 21:20:51 +02001669void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001670trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001671 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1672 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001673{
Tom Zanussie1112b42009-03-31 00:48:49 -05001674 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001675 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001676 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001677 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001678
Steven Rostedtd7690412008-10-01 00:29:53 -04001679 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001680 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001681 return;
1682
Steven Rostedte77405a2009-09-02 14:17:06 -04001683 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001684 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001685 if (!event)
1686 return;
1687 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001688 entry->ip = ip;
1689 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001690
Tom Zanussif306cc82013-10-24 08:34:17 -05001691 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001692 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001693}
1694
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001695#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001696
1697#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1698struct ftrace_stack {
1699 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1700};
1701
1702static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1703static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1704
Steven Rostedte77405a2009-09-02 14:17:06 -04001705static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001706 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001708{
Tom Zanussie1112b42009-03-31 00:48:49 -05001709 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001710 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001711 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001712 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001713 int use_stack;
1714 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001715
1716 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001717 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001718
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001719 /*
1720 * Since events can happen in NMIs there's no safe way to
1721 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1722 * or NMI comes in, it will just have to use the default
1723 * FTRACE_STACK_SIZE.
1724 */
1725 preempt_disable_notrace();
1726
Shan Wei82146522012-11-19 13:21:01 +08001727 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001728 /*
1729 * We don't need any atomic variables, just a barrier.
1730 * If an interrupt comes in, we don't care, because it would
1731 * have exited and put the counter back to what we want.
1732 * We just need a barrier to keep gcc from moving things
1733 * around.
1734 */
1735 barrier();
1736 if (use_stack == 1) {
1737 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1738 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1739
1740 if (regs)
1741 save_stack_trace_regs(regs, &trace);
1742 else
1743 save_stack_trace(&trace);
1744
1745 if (trace.nr_entries > size)
1746 size = trace.nr_entries;
1747 } else
1748 /* From now on, use_stack is a boolean */
1749 use_stack = 0;
1750
1751 size *= sizeof(unsigned long);
1752
1753 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1754 sizeof(*entry) + size, flags, pc);
1755 if (!event)
1756 goto out;
1757 entry = ring_buffer_event_data(event);
1758
1759 memset(&entry->caller, 0, size);
1760
1761 if (use_stack)
1762 memcpy(&entry->caller, trace.entries,
1763 trace.nr_entries * sizeof(unsigned long));
1764 else {
1765 trace.max_entries = FTRACE_STACK_ENTRIES;
1766 trace.entries = entry->caller;
1767 if (regs)
1768 save_stack_trace_regs(regs, &trace);
1769 else
1770 save_stack_trace(&trace);
1771 }
1772
1773 entry->size = trace.nr_entries;
1774
Tom Zanussif306cc82013-10-24 08:34:17 -05001775 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001776 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001777
1778 out:
1779 /* Again, don't let gcc optimize things here */
1780 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001781 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001782 preempt_enable_notrace();
1783
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001784}
1785
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001786void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1787 int skip, int pc, struct pt_regs *regs)
1788{
1789 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1790 return;
1791
1792 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1793}
1794
Steven Rostedte77405a2009-09-02 14:17:06 -04001795void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1796 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001797{
1798 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1799 return;
1800
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001801 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001802}
1803
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001804void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1805 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001806{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001807 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001808}
1809
Steven Rostedt03889382009-12-11 09:48:22 -05001810/**
1811 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001812 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001813 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001814void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001815{
1816 unsigned long flags;
1817
1818 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001819 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001820
1821 local_save_flags(flags);
1822
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001823 /*
1824 * Skip 3 more, seems to get us at the caller of
1825 * this function.
1826 */
1827 skip += 3;
1828 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1829 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001830}
1831
Steven Rostedt91e86e52010-11-10 12:56:12 +01001832static DEFINE_PER_CPU(int, user_stack_count);
1833
Steven Rostedte77405a2009-09-02 14:17:06 -04001834void
1835ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001836{
Tom Zanussie1112b42009-03-31 00:48:49 -05001837 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001838 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001839 struct userstack_entry *entry;
1840 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001841
1842 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1843 return;
1844
Steven Rostedtb6345872010-03-12 20:03:30 -05001845 /*
1846 * NMIs can not handle page faults, even with fix ups.
1847 * The save user stack can (and often does) fault.
1848 */
1849 if (unlikely(in_nmi()))
1850 return;
1851
Steven Rostedt91e86e52010-11-10 12:56:12 +01001852 /*
1853 * prevent recursion, since the user stack tracing may
1854 * trigger other kernel events.
1855 */
1856 preempt_disable();
1857 if (__this_cpu_read(user_stack_count))
1858 goto out;
1859
1860 __this_cpu_inc(user_stack_count);
1861
Steven Rostedte77405a2009-09-02 14:17:06 -04001862 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001863 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001864 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001865 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001866 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001867
Steven Rostedt48659d32009-09-11 11:36:23 -04001868 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001869 memset(&entry->caller, 0, sizeof(entry->caller));
1870
1871 trace.nr_entries = 0;
1872 trace.max_entries = FTRACE_STACK_ENTRIES;
1873 trace.skip = 0;
1874 trace.entries = entry->caller;
1875
1876 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001877 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001878 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001879
Li Zefan1dbd1952010-12-09 15:47:56 +08001880 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001881 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001882 out:
1883 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001884}
1885
Hannes Eder4fd27352009-02-10 19:44:12 +01001886#ifdef UNUSED
1887static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001888{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001889 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001890}
Hannes Eder4fd27352009-02-10 19:44:12 +01001891#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001892
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001893#endif /* CONFIG_STACKTRACE */
1894
Steven Rostedt07d777f2011-09-22 14:01:55 -04001895/* created for use with alloc_percpu */
1896struct trace_buffer_struct {
1897 char buffer[TRACE_BUF_SIZE];
1898};
1899
1900static struct trace_buffer_struct *trace_percpu_buffer;
1901static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1902static struct trace_buffer_struct *trace_percpu_irq_buffer;
1903static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1904
1905/*
1906 * The buffer used is dependent on the context. There is a per cpu
1907 * buffer for normal context, softirq contex, hard irq context and
1908 * for NMI context. Thise allows for lockless recording.
1909 *
1910 * Note, if the buffers failed to be allocated, then this returns NULL
1911 */
1912static char *get_trace_buf(void)
1913{
1914 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001915
1916 /*
1917 * If we have allocated per cpu buffers, then we do not
1918 * need to do any locking.
1919 */
1920 if (in_nmi())
1921 percpu_buffer = trace_percpu_nmi_buffer;
1922 else if (in_irq())
1923 percpu_buffer = trace_percpu_irq_buffer;
1924 else if (in_softirq())
1925 percpu_buffer = trace_percpu_sirq_buffer;
1926 else
1927 percpu_buffer = trace_percpu_buffer;
1928
1929 if (!percpu_buffer)
1930 return NULL;
1931
Shan Weid8a03492012-11-13 09:53:04 +08001932 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001933}
1934
1935static int alloc_percpu_trace_buffer(void)
1936{
1937 struct trace_buffer_struct *buffers;
1938 struct trace_buffer_struct *sirq_buffers;
1939 struct trace_buffer_struct *irq_buffers;
1940 struct trace_buffer_struct *nmi_buffers;
1941
1942 buffers = alloc_percpu(struct trace_buffer_struct);
1943 if (!buffers)
1944 goto err_warn;
1945
1946 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1947 if (!sirq_buffers)
1948 goto err_sirq;
1949
1950 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1951 if (!irq_buffers)
1952 goto err_irq;
1953
1954 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1955 if (!nmi_buffers)
1956 goto err_nmi;
1957
1958 trace_percpu_buffer = buffers;
1959 trace_percpu_sirq_buffer = sirq_buffers;
1960 trace_percpu_irq_buffer = irq_buffers;
1961 trace_percpu_nmi_buffer = nmi_buffers;
1962
1963 return 0;
1964
1965 err_nmi:
1966 free_percpu(irq_buffers);
1967 err_irq:
1968 free_percpu(sirq_buffers);
1969 err_sirq:
1970 free_percpu(buffers);
1971 err_warn:
1972 WARN(1, "Could not allocate percpu trace_printk buffer");
1973 return -ENOMEM;
1974}
1975
Steven Rostedt81698832012-10-11 10:15:05 -04001976static int buffers_allocated;
1977
Steven Rostedt07d777f2011-09-22 14:01:55 -04001978void trace_printk_init_buffers(void)
1979{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001980 if (buffers_allocated)
1981 return;
1982
1983 if (alloc_percpu_trace_buffer())
1984 return;
1985
1986 pr_info("ftrace: Allocated trace_printk buffers\n");
1987
Steven Rostedtb382ede62012-10-10 21:44:34 -04001988 /* Expand the buffers to set size */
1989 tracing_update_buffers();
1990
Steven Rostedt07d777f2011-09-22 14:01:55 -04001991 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001992
1993 /*
1994 * trace_printk_init_buffers() can be called by modules.
1995 * If that happens, then we need to start cmdline recording
1996 * directly here. If the global_trace.buffer is already
1997 * allocated here, then this was called by module code.
1998 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001999 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002000 tracing_start_cmdline_record();
2001}
2002
2003void trace_printk_start_comm(void)
2004{
2005 /* Start tracing comms if trace printk is set */
2006 if (!buffers_allocated)
2007 return;
2008 tracing_start_cmdline_record();
2009}
2010
2011static void trace_printk_start_stop_comm(int enabled)
2012{
2013 if (!buffers_allocated)
2014 return;
2015
2016 if (enabled)
2017 tracing_start_cmdline_record();
2018 else
2019 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002020}
2021
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002022/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002023 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002024 *
2025 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002026int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002027{
Tom Zanussie1112b42009-03-31 00:48:49 -05002028 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002029 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002030 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002031 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002032 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002033 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002034 char *tbuffer;
2035 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002036
2037 if (unlikely(tracing_selftest_running || tracing_disabled))
2038 return 0;
2039
2040 /* Don't pollute graph traces with trace_vprintk internals */
2041 pause_graph_tracing();
2042
2043 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002044 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002045
Steven Rostedt07d777f2011-09-22 14:01:55 -04002046 tbuffer = get_trace_buf();
2047 if (!tbuffer) {
2048 len = 0;
2049 goto out;
2050 }
2051
2052 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2053
2054 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002055 goto out;
2056
Steven Rostedt07d777f2011-09-22 14:01:55 -04002057 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002059 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002060 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2061 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002062 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002063 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002064 entry = ring_buffer_event_data(event);
2065 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002066 entry->fmt = fmt;
2067
Steven Rostedt07d777f2011-09-22 14:01:55 -04002068 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002069 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002070 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002071 ftrace_trace_stack(buffer, flags, 6, pc);
2072 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002073
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002075 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002076 unpause_graph_tracing();
2077
2078 return len;
2079}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002080EXPORT_SYMBOL_GPL(trace_vbprintk);
2081
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002082static int
2083__trace_array_vprintk(struct ring_buffer *buffer,
2084 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085{
Tom Zanussie1112b42009-03-31 00:48:49 -05002086 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002087 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002088 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002089 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002090 unsigned long flags;
2091 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002092
2093 if (tracing_disabled || tracing_selftest_running)
2094 return 0;
2095
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 /* Don't pollute graph traces with trace_vprintk internals */
2097 pause_graph_tracing();
2098
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002099 pc = preempt_count();
2100 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002101
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102
2103 tbuffer = get_trace_buf();
2104 if (!tbuffer) {
2105 len = 0;
2106 goto out;
2107 }
2108
2109 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2110 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002111 goto out;
2112
Steven Rostedt07d777f2011-09-22 14:01:55 -04002113 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002114 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002115 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002116 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002117 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002118 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002119 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002120 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002121
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002123 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002124 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002125 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002127 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002128 out:
2129 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002130 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002131
2132 return len;
2133}
Steven Rostedt659372d2009-09-03 19:11:07 -04002134
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002135int trace_array_vprintk(struct trace_array *tr,
2136 unsigned long ip, const char *fmt, va_list args)
2137{
2138 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2139}
2140
2141int trace_array_printk(struct trace_array *tr,
2142 unsigned long ip, const char *fmt, ...)
2143{
2144 int ret;
2145 va_list ap;
2146
2147 if (!(trace_flags & TRACE_ITER_PRINTK))
2148 return 0;
2149
2150 va_start(ap, fmt);
2151 ret = trace_array_vprintk(tr, ip, fmt, ap);
2152 va_end(ap);
2153 return ret;
2154}
2155
2156int trace_array_printk_buf(struct ring_buffer *buffer,
2157 unsigned long ip, const char *fmt, ...)
2158{
2159 int ret;
2160 va_list ap;
2161
2162 if (!(trace_flags & TRACE_ITER_PRINTK))
2163 return 0;
2164
2165 va_start(ap, fmt);
2166 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2167 va_end(ap);
2168 return ret;
2169}
2170
Steven Rostedt659372d2009-09-03 19:11:07 -04002171int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2172{
Steven Rostedta813a152009-10-09 01:41:35 -04002173 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002174}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002175EXPORT_SYMBOL_GPL(trace_vprintk);
2176
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002177static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002178{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002179 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2180
Steven Rostedt5a90f572008-09-03 17:42:51 -04002181 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002182 if (buf_iter)
2183 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002184}
2185
Ingo Molnare309b412008-05-12 21:20:51 +02002186static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002187peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2188 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002189{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002190 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002191 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002192
Steven Rostedtd7690412008-10-01 00:29:53 -04002193 if (buf_iter)
2194 event = ring_buffer_iter_peek(buf_iter, ts);
2195 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002196 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002197 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002198
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002199 if (event) {
2200 iter->ent_size = ring_buffer_event_length(event);
2201 return ring_buffer_event_data(event);
2202 }
2203 iter->ent_size = 0;
2204 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002205}
Steven Rostedtd7690412008-10-01 00:29:53 -04002206
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002207static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002208__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2209 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002210{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002211 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002212 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002213 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002214 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002215 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002216 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002217 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002218 int cpu;
2219
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002220 /*
2221 * If we are in a per_cpu trace file, don't bother by iterating over
2222 * all cpu and peek directly.
2223 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002224 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002225 if (ring_buffer_empty_cpu(buffer, cpu_file))
2226 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002227 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002228 if (ent_cpu)
2229 *ent_cpu = cpu_file;
2230
2231 return ent;
2232 }
2233
Steven Rostedtab464282008-05-12 21:21:00 +02002234 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002235
2236 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002237 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002238
Steven Rostedtbc21b472010-03-31 19:49:26 -04002239 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002240
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002241 /*
2242 * Pick the entry with the smallest timestamp:
2243 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002244 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245 next = ent;
2246 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002247 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002248 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002249 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002250 }
2251 }
2252
Steven Rostedt12b5da32012-03-27 10:43:28 -04002253 iter->ent_size = next_size;
2254
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255 if (ent_cpu)
2256 *ent_cpu = next_cpu;
2257
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002258 if (ent_ts)
2259 *ent_ts = next_ts;
2260
Steven Rostedtbc21b472010-03-31 19:49:26 -04002261 if (missing_events)
2262 *missing_events = next_lost;
2263
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002264 return next;
2265}
2266
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002268struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2269 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002270{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002272}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002273
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002274/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002275void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002276{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002277 iter->ent = __find_next_entry(iter, &iter->cpu,
2278 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002279
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002280 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002281 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002282
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002283 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002284}
2285
Ingo Molnare309b412008-05-12 21:20:51 +02002286static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002287{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002288 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002290}
2291
Ingo Molnare309b412008-05-12 21:20:51 +02002292static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002293{
2294 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002295 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002296 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002298 WARN_ON_ONCE(iter->leftover);
2299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 (*pos)++;
2301
2302 /* can't go backwards */
2303 if (iter->idx > i)
2304 return NULL;
2305
2306 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002307 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308 else
2309 ent = iter;
2310
2311 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002312 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313
2314 iter->pos = *pos;
2315
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316 return ent;
2317}
2318
Jason Wessel955b61e2010-08-05 09:22:23 -05002319void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002320{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002321 struct ring_buffer_event *event;
2322 struct ring_buffer_iter *buf_iter;
2323 unsigned long entries = 0;
2324 u64 ts;
2325
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002326 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002327
Steven Rostedt6d158a82012-06-27 20:46:14 -04002328 buf_iter = trace_buffer_iter(iter, cpu);
2329 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002330 return;
2331
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002332 ring_buffer_iter_reset(buf_iter);
2333
2334 /*
2335 * We could have the case with the max latency tracers
2336 * that a reset never took place on a cpu. This is evident
2337 * by the timestamp being before the start of the buffer.
2338 */
2339 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002340 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002341 break;
2342 entries++;
2343 ring_buffer_read(buf_iter, NULL);
2344 }
2345
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002346 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002347}
2348
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002349/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002350 * The current tracer is copied to avoid a global locking
2351 * all around.
2352 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353static void *s_start(struct seq_file *m, loff_t *pos)
2354{
2355 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002356 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002357 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002358 void *p = NULL;
2359 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002360 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002361
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002362 /*
2363 * copy the tracer to avoid using a global lock all around.
2364 * iter->trace is a copy of current_trace, the pointer to the
2365 * name may be used instead of a strcmp(), as iter->trace->name
2366 * will point to the same string as current_trace->name.
2367 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002369 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2370 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002371 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002373#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002374 if (iter->snapshot && iter->trace->use_max_tr)
2375 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002376#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002377
2378 if (!iter->snapshot)
2379 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381 if (*pos != iter->pos) {
2382 iter->ent = NULL;
2383 iter->cpu = 0;
2384 iter->idx = -1;
2385
Steven Rostedtae3b5092013-01-23 15:22:59 -05002386 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002387 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002388 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002389 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391
Lai Jiangshanac91d852010-03-02 17:54:50 +08002392 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002393 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2394 ;
2395
2396 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002397 /*
2398 * If we overflowed the seq_file before, then we want
2399 * to just reuse the trace_seq buffer again.
2400 */
2401 if (iter->leftover)
2402 p = iter;
2403 else {
2404 l = *pos - 1;
2405 p = s_next(m, p, &l);
2406 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407 }
2408
Lai Jiangshan4f535962009-05-18 19:35:34 +08002409 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002410 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411 return p;
2412}
2413
2414static void s_stop(struct seq_file *m, void *p)
2415{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002416 struct trace_iterator *iter = m->private;
2417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002418#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002419 if (iter->snapshot && iter->trace->use_max_tr)
2420 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002421#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002422
2423 if (!iter->snapshot)
2424 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002426 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002427 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428}
2429
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002430static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431get_total_entries(struct trace_buffer *buf,
2432 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002433{
2434 unsigned long count;
2435 int cpu;
2436
2437 *total = 0;
2438 *entries = 0;
2439
2440 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002441 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002442 /*
2443 * If this buffer has skipped entries, then we hold all
2444 * entries for the trace and we need to ignore the
2445 * ones before the time stamp.
2446 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2448 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002449 /* total is the same as the entries */
2450 *total += count;
2451 } else
2452 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002453 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002454 *entries += count;
2455 }
2456}
2457
Ingo Molnare309b412008-05-12 21:20:51 +02002458static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459{
Michael Ellermana6168352008-08-20 16:36:11 -07002460 seq_puts(m, "# _------=> CPU# \n");
2461 seq_puts(m, "# / _-----=> irqs-off \n");
2462 seq_puts(m, "# | / _----=> need-resched \n");
2463 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2464 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002465 seq_puts(m, "# |||| / delay \n");
2466 seq_puts(m, "# cmd pid ||||| time | caller \n");
2467 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002468}
2469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002470static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002472 unsigned long total;
2473 unsigned long entries;
2474
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002476 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2477 entries, total, num_online_cpus());
2478 seq_puts(m, "#\n");
2479}
2480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002484 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002485 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002486}
2487
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002489{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002491 seq_puts(m, "# _-----=> irqs-off\n");
2492 seq_puts(m, "# / _----=> need-resched\n");
2493 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2494 seq_puts(m, "# || / _--=> preempt-depth\n");
2495 seq_puts(m, "# ||| / delay\n");
2496 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2497 seq_puts(m, "# | | | |||| | |\n");
2498}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002499
Jiri Olsa62b915f2010-04-02 19:01:22 +02002500void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002501print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2502{
2503 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002504 struct trace_buffer *buf = iter->trace_buffer;
2505 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002506 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002507 unsigned long entries;
2508 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509 const char *name = "preemption";
2510
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002511 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002512
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002513 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002514
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002515 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002516 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002517 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002518 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002519 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002521 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002523 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002524 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002525#if defined(CONFIG_PREEMPT_NONE)
2526 "server",
2527#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2528 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002529#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530 "preempt",
2531#else
2532 "unknown",
2533#endif
2534 /* These are reserved for later use */
2535 0, 0, 0, 0);
2536#ifdef CONFIG_SMP
2537 seq_printf(m, " #P:%d)\n", num_online_cpus());
2538#else
2539 seq_puts(m, ")\n");
2540#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002541 seq_puts(m, "# -----------------\n");
2542 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002543 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002544 data->comm, data->pid,
2545 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002546 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002547 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548
2549 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002550 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002551 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2552 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002554 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2555 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002556 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002557 }
2558
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002559 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002560}
2561
Steven Rostedta3097202008-11-07 22:36:02 -05002562static void test_cpu_buff_start(struct trace_iterator *iter)
2563{
2564 struct trace_seq *s = &iter->seq;
2565
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002566 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2567 return;
2568
2569 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2570 return;
2571
Rusty Russell44623442009-01-01 10:12:23 +10302572 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002573 return;
2574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002576 return;
2577
Rusty Russell44623442009-01-01 10:12:23 +10302578 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002579
2580 /* Don't print started cpu buffer for the first entry of the trace */
2581 if (iter->idx > 1)
2582 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2583 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002584}
2585
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002586static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587{
Steven Rostedt214023c2008-05-12 21:20:46 +02002588 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002589 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002590 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002591 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002593 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002594
Steven Rostedta3097202008-11-07 22:36:02 -05002595 test_cpu_buff_start(iter);
2596
Steven Rostedtf633cef2008-12-23 23:24:13 -05002597 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002598
2599 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002600 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2601 if (!trace_print_lat_context(iter))
2602 goto partial;
2603 } else {
2604 if (!trace_print_context(iter))
2605 goto partial;
2606 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002607 }
2608
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002609 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002610 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002611
2612 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2613 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002614
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002615 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002616partial:
2617 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002618}
2619
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002620static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002621{
2622 struct trace_seq *s = &iter->seq;
2623 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002624 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002625
2626 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002627
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002628 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002629 if (!trace_seq_printf(s, "%d %d %llu ",
2630 entry->pid, iter->cpu, iter->ts))
2631 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002632 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002633
Steven Rostedtf633cef2008-12-23 23:24:13 -05002634 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002635 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002636 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002637
2638 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2639 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002640
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002641 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002642partial:
2643 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002644}
2645
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002646static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002647{
2648 struct trace_seq *s = &iter->seq;
2649 unsigned char newline = '\n';
2650 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002651 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002652
2653 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002654
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002655 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2656 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2657 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2658 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2659 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002660
Steven Rostedtf633cef2008-12-23 23:24:13 -05002661 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002662 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002663 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002664 if (ret != TRACE_TYPE_HANDLED)
2665 return ret;
2666 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002667
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002668 SEQ_PUT_FIELD_RET(s, newline);
2669
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002670 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002671}
2672
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002673static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002674{
2675 struct trace_seq *s = &iter->seq;
2676 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002677 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002678
2679 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002680
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002681 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2682 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002683 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002684 SEQ_PUT_FIELD_RET(s, iter->ts);
2685 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002686
Steven Rostedtf633cef2008-12-23 23:24:13 -05002687 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002688 return event ? event->funcs->binary(iter, 0, event) :
2689 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002690}
2691
Jiri Olsa62b915f2010-04-02 19:01:22 +02002692int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002693{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002694 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002695 int cpu;
2696
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002697 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002698 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002699 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002700 buf_iter = trace_buffer_iter(iter, cpu);
2701 if (buf_iter) {
2702 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002703 return 0;
2704 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002705 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002706 return 0;
2707 }
2708 return 1;
2709 }
2710
Steven Rostedtab464282008-05-12 21:21:00 +02002711 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002712 buf_iter = trace_buffer_iter(iter, cpu);
2713 if (buf_iter) {
2714 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002715 return 0;
2716 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002717 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002718 return 0;
2719 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002720 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002721
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002722 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002723}
2724
Lai Jiangshan4f535962009-05-18 19:35:34 +08002725/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002726enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002727{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002728 enum print_line_t ret;
2729
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002730 if (iter->lost_events &&
2731 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2732 iter->cpu, iter->lost_events))
2733 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002734
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735 if (iter->trace && iter->trace->print_line) {
2736 ret = iter->trace->print_line(iter);
2737 if (ret != TRACE_TYPE_UNHANDLED)
2738 return ret;
2739 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002740
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002741 if (iter->ent->type == TRACE_BPUTS &&
2742 trace_flags & TRACE_ITER_PRINTK &&
2743 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2744 return trace_print_bputs_msg_only(iter);
2745
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002746 if (iter->ent->type == TRACE_BPRINT &&
2747 trace_flags & TRACE_ITER_PRINTK &&
2748 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002749 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002750
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002751 if (iter->ent->type == TRACE_PRINT &&
2752 trace_flags & TRACE_ITER_PRINTK &&
2753 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002754 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002755
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002756 if (trace_flags & TRACE_ITER_BIN)
2757 return print_bin_fmt(iter);
2758
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002759 if (trace_flags & TRACE_ITER_HEX)
2760 return print_hex_fmt(iter);
2761
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002762 if (trace_flags & TRACE_ITER_RAW)
2763 return print_raw_fmt(iter);
2764
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002765 return print_trace_fmt(iter);
2766}
2767
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002768void trace_latency_header(struct seq_file *m)
2769{
2770 struct trace_iterator *iter = m->private;
2771
2772 /* print nothing if the buffers are empty */
2773 if (trace_empty(iter))
2774 return;
2775
2776 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2777 print_trace_header(m, iter);
2778
2779 if (!(trace_flags & TRACE_ITER_VERBOSE))
2780 print_lat_help_header(m);
2781}
2782
Jiri Olsa62b915f2010-04-02 19:01:22 +02002783void trace_default_header(struct seq_file *m)
2784{
2785 struct trace_iterator *iter = m->private;
2786
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002787 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2788 return;
2789
Jiri Olsa62b915f2010-04-02 19:01:22 +02002790 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2791 /* print nothing if the buffers are empty */
2792 if (trace_empty(iter))
2793 return;
2794 print_trace_header(m, iter);
2795 if (!(trace_flags & TRACE_ITER_VERBOSE))
2796 print_lat_help_header(m);
2797 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002798 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2799 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002800 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002801 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002802 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002803 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002804 }
2805}
2806
Steven Rostedte0a413f2011-09-29 21:26:16 -04002807static void test_ftrace_alive(struct seq_file *m)
2808{
2809 if (!ftrace_is_dead())
2810 return;
2811 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2812 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2813}
2814
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002815#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002816static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002817{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002818 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2819 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2820 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002821 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002822 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m, "# is not a '0' or '1')\n");
2824}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002825
2826static void show_snapshot_percpu_help(struct seq_file *m)
2827{
2828 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2829#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2830 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2831 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2832#else
2833 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2834 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2835#endif
2836 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2837 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2838 seq_printf(m, "# is not a '0' or '1')\n");
2839}
2840
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002841static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2842{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002843 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002844 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2845 else
2846 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2847
2848 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002849 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2850 show_snapshot_main_help(m);
2851 else
2852 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002853}
2854#else
2855/* Should never be called */
2856static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2857#endif
2858
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002859static int s_show(struct seq_file *m, void *v)
2860{
2861 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002862 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002863
2864 if (iter->ent == NULL) {
2865 if (iter->tr) {
2866 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2867 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002868 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002870 if (iter->snapshot && trace_empty(iter))
2871 print_snapshot_help(m, iter);
2872 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002873 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002874 else
2875 trace_default_header(m);
2876
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002877 } else if (iter->leftover) {
2878 /*
2879 * If we filled the seq_file buffer earlier, we
2880 * want to just show it now.
2881 */
2882 ret = trace_print_seq(m, &iter->seq);
2883
2884 /* ret should this time be zero, but you never know */
2885 iter->leftover = ret;
2886
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002887 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002888 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002889 ret = trace_print_seq(m, &iter->seq);
2890 /*
2891 * If we overflow the seq_file buffer, then it will
2892 * ask us for this data again at start up.
2893 * Use that instead.
2894 * ret is 0 if seq_file write succeeded.
2895 * -1 otherwise.
2896 */
2897 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002898 }
2899
2900 return 0;
2901}
2902
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002903/*
2904 * Should be used after trace_array_get(), trace_types_lock
2905 * ensures that i_cdev was already initialized.
2906 */
2907static inline int tracing_get_cpu(struct inode *inode)
2908{
2909 if (inode->i_cdev) /* See trace_create_cpu_file() */
2910 return (long)inode->i_cdev - 1;
2911 return RING_BUFFER_ALL_CPUS;
2912}
2913
James Morris88e9d342009-09-22 16:43:43 -07002914static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002915 .start = s_start,
2916 .next = s_next,
2917 .stop = s_stop,
2918 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919};
2920
Ingo Molnare309b412008-05-12 21:20:51 +02002921static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002922__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002924 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002926 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002927
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002928 if (tracing_disabled)
2929 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002930
Jiri Olsa50e18b92012-04-25 10:23:39 +02002931 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002932 if (!iter)
2933 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002934
Steven Rostedt6d158a82012-06-27 20:46:14 -04002935 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2936 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002937 if (!iter->buffer_iter)
2938 goto release;
2939
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002940 /*
2941 * We make a copy of the current tracer to avoid concurrent
2942 * changes on it while we are reading.
2943 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002944 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002945 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002946 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002947 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002948
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002949 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002950
Li Zefan79f55992009-06-15 14:58:26 +08002951 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002952 goto fail;
2953
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002954 iter->tr = tr;
2955
2956#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002957 /* Currently only the top directory has a snapshot */
2958 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002959 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002961#endif
2962 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002963 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002964 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002965 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002966 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002968 /* Notify the tracer early; before we stop tracing. */
2969 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002970 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002971
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002972 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002973 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002974 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2975
David Sharp8be07092012-11-13 12:18:22 -08002976 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002977 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002978 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2979
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002980 /* stop the trace while dumping if we are not opening "snapshot" */
2981 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002982 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002983
Steven Rostedtae3b5092013-01-23 15:22:59 -05002984 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002985 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002986 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002987 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002988 }
2989 ring_buffer_read_prepare_sync();
2990 for_each_tracing_cpu(cpu) {
2991 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002992 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002993 }
2994 } else {
2995 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002996 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002997 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002998 ring_buffer_read_prepare_sync();
2999 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003000 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003001 }
3002
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003 mutex_unlock(&trace_types_lock);
3004
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003006
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003008 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003009 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003010 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003011release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003012 seq_release_private(inode, file);
3013 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014}
3015
3016int tracing_open_generic(struct inode *inode, struct file *filp)
3017{
Steven Rostedt60a11772008-05-12 21:20:44 +02003018 if (tracing_disabled)
3019 return -ENODEV;
3020
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021 filp->private_data = inode->i_private;
3022 return 0;
3023}
3024
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003025bool tracing_is_disabled(void)
3026{
3027 return (tracing_disabled) ? true: false;
3028}
3029
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003030/*
3031 * Open and update trace_array ref count.
3032 * Must have the current trace_array passed to it.
3033 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003034static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003035{
3036 struct trace_array *tr = inode->i_private;
3037
3038 if (tracing_disabled)
3039 return -ENODEV;
3040
3041 if (trace_array_get(tr) < 0)
3042 return -ENODEV;
3043
3044 filp->private_data = inode->i_private;
3045
3046 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003047}
3048
Hannes Eder4fd27352009-02-10 19:44:12 +01003049static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003051 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003052 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003053 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003054 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003056 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003057 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003058 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003059 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003060
Oleg Nesterov6484c712013-07-23 17:26:10 +02003061 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003062 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003064
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003065 for_each_tracing_cpu(cpu) {
3066 if (iter->buffer_iter[cpu])
3067 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3068 }
3069
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003070 if (iter->trace && iter->trace->close)
3071 iter->trace->close(iter);
3072
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003073 if (!iter->snapshot)
3074 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003075 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003076
3077 __trace_array_put(tr);
3078
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003079 mutex_unlock(&trace_types_lock);
3080
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003081 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003082 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003083 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003084 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003085 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003086
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003087 return 0;
3088}
3089
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003090static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3091{
3092 struct trace_array *tr = inode->i_private;
3093
3094 trace_array_put(tr);
3095 return 0;
3096}
3097
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003098static int tracing_single_release_tr(struct inode *inode, struct file *file)
3099{
3100 struct trace_array *tr = inode->i_private;
3101
3102 trace_array_put(tr);
3103
3104 return single_release(inode, file);
3105}
3106
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003107static int tracing_open(struct inode *inode, struct file *file)
3108{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003109 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003110 struct trace_iterator *iter;
3111 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003112
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003113 if (trace_array_get(tr) < 0)
3114 return -ENODEV;
3115
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003117 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3118 int cpu = tracing_get_cpu(inode);
3119
3120 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003121 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124 }
3125
3126 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003127 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003128 if (IS_ERR(iter))
3129 ret = PTR_ERR(iter);
3130 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3131 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3132 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003133
3134 if (ret < 0)
3135 trace_array_put(tr);
3136
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 return ret;
3138}
3139
Ingo Molnare309b412008-05-12 21:20:51 +02003140static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141t_next(struct seq_file *m, void *v, loff_t *pos)
3142{
Li Zefanf129e962009-06-24 09:53:44 +08003143 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144
3145 (*pos)++;
3146
3147 if (t)
3148 t = t->next;
3149
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003150 return t;
3151}
3152
3153static void *t_start(struct seq_file *m, loff_t *pos)
3154{
Li Zefanf129e962009-06-24 09:53:44 +08003155 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156 loff_t l = 0;
3157
3158 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08003159 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003160 ;
3161
3162 return t;
3163}
3164
3165static void t_stop(struct seq_file *m, void *p)
3166{
3167 mutex_unlock(&trace_types_lock);
3168}
3169
3170static int t_show(struct seq_file *m, void *v)
3171{
3172 struct tracer *t = v;
3173
3174 if (!t)
3175 return 0;
3176
3177 seq_printf(m, "%s", t->name);
3178 if (t->next)
3179 seq_putc(m, ' ');
3180 else
3181 seq_putc(m, '\n');
3182
3183 return 0;
3184}
3185
James Morris88e9d342009-09-22 16:43:43 -07003186static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003187 .start = t_start,
3188 .next = t_next,
3189 .stop = t_stop,
3190 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003191};
3192
3193static int show_traces_open(struct inode *inode, struct file *file)
3194{
Steven Rostedt60a11772008-05-12 21:20:44 +02003195 if (tracing_disabled)
3196 return -ENODEV;
3197
Li Zefanf129e962009-06-24 09:53:44 +08003198 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003199}
3200
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003201static ssize_t
3202tracing_write_stub(struct file *filp, const char __user *ubuf,
3203 size_t count, loff_t *ppos)
3204{
3205 return count;
3206}
3207
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003208loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003209{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003210 int ret;
3211
Slava Pestov364829b2010-11-24 15:13:16 -08003212 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003213 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003214 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003215 file->f_pos = ret = 0;
3216
3217 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003218}
3219
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003220static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003221 .open = tracing_open,
3222 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003223 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003224 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003225 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226};
3227
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003228static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003229 .open = show_traces_open,
3230 .read = seq_read,
3231 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003232 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003233};
3234
Ingo Molnar36dfe922008-05-12 21:20:52 +02003235/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003236 * The tracer itself will not take this lock, but still we want
3237 * to provide a consistent cpumask to user-space:
3238 */
3239static DEFINE_MUTEX(tracing_cpumask_update_lock);
3240
3241/*
3242 * Temporary storage for the character representation of the
3243 * CPU bitmask (and one more byte for the newline):
3244 */
3245static char mask_str[NR_CPUS + 1];
3246
Ingo Molnarc7078de2008-05-12 21:20:52 +02003247static ssize_t
3248tracing_cpumask_read(struct file *filp, char __user *ubuf,
3249 size_t count, loff_t *ppos)
3250{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003251 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003252 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003253
3254 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003255
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003256 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003257 if (count - len < 2) {
3258 count = -EINVAL;
3259 goto out_err;
3260 }
3261 len += sprintf(mask_str + len, "\n");
3262 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3263
3264out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003265 mutex_unlock(&tracing_cpumask_update_lock);
3266
3267 return count;
3268}
3269
3270static ssize_t
3271tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3272 size_t count, loff_t *ppos)
3273{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003274 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303275 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003276 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303277
3278 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3279 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003280
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303281 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003282 if (err)
3283 goto err_unlock;
3284
Li Zefan215368e2009-06-15 10:56:42 +08003285 mutex_lock(&tracing_cpumask_update_lock);
3286
Steven Rostedta5e25882008-12-02 15:34:05 -05003287 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003288 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003289 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003290 /*
3291 * Increase/decrease the disabled counter if we are
3292 * about to flip a bit in the cpumask:
3293 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003294 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303295 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003296 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3297 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003298 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003299 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303300 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003301 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3302 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003303 }
3304 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003305 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003306 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003307
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003308 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003309
Ingo Molnarc7078de2008-05-12 21:20:52 +02003310 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303311 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003312
Ingo Molnarc7078de2008-05-12 21:20:52 +02003313 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003314
3315err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003316 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003317
3318 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319}
3320
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003321static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003322 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003323 .read = tracing_cpumask_read,
3324 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003325 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003326 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327};
3328
Li Zefanfdb372e2009-12-08 11:15:59 +08003329static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003330{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003331 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003332 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003333 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003334 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003335
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003336 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003337 tracer_flags = tr->current_trace->flags->val;
3338 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003339
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003340 for (i = 0; trace_options[i]; i++) {
3341 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003342 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003343 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003344 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003345 }
3346
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003347 for (i = 0; trace_opts[i].name; i++) {
3348 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003349 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003350 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003351 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003352 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003353 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003354
Li Zefanfdb372e2009-12-08 11:15:59 +08003355 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356}
3357
Li Zefan8d18eaa2009-12-08 11:17:06 +08003358static int __set_tracer_option(struct tracer *trace,
3359 struct tracer_flags *tracer_flags,
3360 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003361{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003362 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003363
Li Zefan8d18eaa2009-12-08 11:17:06 +08003364 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003365 if (ret)
3366 return ret;
3367
3368 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003369 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003370 else
Zhaolei77708412009-08-07 18:53:21 +08003371 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003372 return 0;
3373}
3374
Li Zefan8d18eaa2009-12-08 11:17:06 +08003375/* Try to assign a tracer specific option */
3376static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3377{
3378 struct tracer_flags *tracer_flags = trace->flags;
3379 struct tracer_opt *opts = NULL;
3380 int i;
3381
3382 for (i = 0; tracer_flags->opts[i].name; i++) {
3383 opts = &tracer_flags->opts[i];
3384
3385 if (strcmp(cmp, opts->name) == 0)
3386 return __set_tracer_option(trace, trace->flags,
3387 opts, neg);
3388 }
3389
3390 return -EINVAL;
3391}
3392
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003393/* Some tracers require overwrite to stay enabled */
3394int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3395{
3396 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3397 return -1;
3398
3399 return 0;
3400}
3401
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003402int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003403{
3404 /* do nothing if flag is already set */
3405 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003406 return 0;
3407
3408 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003409 if (tr->current_trace->flag_changed)
3410 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003411 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003412
3413 if (enabled)
3414 trace_flags |= mask;
3415 else
3416 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003417
3418 if (mask == TRACE_ITER_RECORD_CMD)
3419 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003420
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003421 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003422 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003423#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003424 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003425#endif
3426 }
Steven Rostedt81698832012-10-11 10:15:05 -04003427
3428 if (mask == TRACE_ITER_PRINTK)
3429 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003430
3431 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003432}
3433
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003434static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003436 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003438 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 int i;
3440
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003441 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442
Li Zefan8d18eaa2009-12-08 11:17:06 +08003443 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 neg = 1;
3445 cmp += 2;
3446 }
3447
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003448 mutex_lock(&trace_types_lock);
3449
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003450 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003451 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003452 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453 break;
3454 }
3455 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003456
3457 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003458 if (!trace_options[i])
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003459 ret = set_tracer_option(tr->current_trace, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003460
3461 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003463 return ret;
3464}
3465
3466static ssize_t
3467tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3468 size_t cnt, loff_t *ppos)
3469{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003470 struct seq_file *m = filp->private_data;
3471 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003472 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003473 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003474
3475 if (cnt >= sizeof(buf))
3476 return -EINVAL;
3477
3478 if (copy_from_user(&buf, ubuf, cnt))
3479 return -EFAULT;
3480
Steven Rostedta8dd2172013-01-09 20:54:17 -05003481 buf[cnt] = 0;
3482
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003483 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003484 if (ret < 0)
3485 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003486
Jiri Olsacf8517c2009-10-23 19:36:16 -04003487 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488
3489 return cnt;
3490}
3491
Li Zefanfdb372e2009-12-08 11:15:59 +08003492static int tracing_trace_options_open(struct inode *inode, struct file *file)
3493{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003494 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003495 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003496
Li Zefanfdb372e2009-12-08 11:15:59 +08003497 if (tracing_disabled)
3498 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003499
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003500 if (trace_array_get(tr) < 0)
3501 return -ENODEV;
3502
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003503 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3504 if (ret < 0)
3505 trace_array_put(tr);
3506
3507 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003508}
3509
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003510static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003511 .open = tracing_trace_options_open,
3512 .read = seq_read,
3513 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003514 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003515 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003516};
3517
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003518static const char readme_msg[] =
3519 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003520 "# echo 0 > tracing_on : quick way to disable tracing\n"
3521 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3522 " Important files:\n"
3523 " trace\t\t\t- The static contents of the buffer\n"
3524 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3525 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3526 " current_tracer\t- function and latency tracers\n"
3527 " available_tracers\t- list of configured tracers for current_tracer\n"
3528 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3529 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3530 " trace_clock\t\t-change the clock used to order events\n"
3531 " local: Per cpu clock but may not be synced across CPUs\n"
3532 " global: Synced across CPUs but slows tracing down.\n"
3533 " counter: Not a clock, but just an increment\n"
3534 " uptime: Jiffy counter from time of boot\n"
3535 " perf: Same clock that perf events use\n"
3536#ifdef CONFIG_X86_64
3537 " x86-tsc: TSC cycle counter\n"
3538#endif
3539 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3540 " tracing_cpumask\t- Limit which CPUs to trace\n"
3541 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3542 "\t\t\t Remove sub-buffer with rmdir\n"
3543 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003544 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3545 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003546#ifdef CONFIG_DYNAMIC_FTRACE
3547 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003548 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3549 "\t\t\t functions\n"
3550 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3551 "\t modules: Can select a group via module\n"
3552 "\t Format: :mod:<module-name>\n"
3553 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3554 "\t triggers: a command to perform when function is hit\n"
3555 "\t Format: <function>:<trigger>[:count]\n"
3556 "\t trigger: traceon, traceoff\n"
3557 "\t\t enable_event:<system>:<event>\n"
3558 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003559#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003560 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003561#endif
3562#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003563 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003564#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003565 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3566 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3567 "\t The first one will disable tracing every time do_fault is hit\n"
3568 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3569 "\t The first time do trap is hit and it disables tracing, the\n"
3570 "\t counter will decrement to 2. If tracing is already disabled,\n"
3571 "\t the counter will not decrement. It only decrements when the\n"
3572 "\t trigger did work\n"
3573 "\t To remove trigger without count:\n"
3574 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3575 "\t To remove trigger with a count:\n"
3576 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003577 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003578 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3579 "\t modules: Can select a group via module command :mod:\n"
3580 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003581#endif /* CONFIG_DYNAMIC_FTRACE */
3582#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003583 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3584 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003585#endif
3586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3587 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3588 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3589#endif
3590#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3592 "\t\t\t snapshot buffer. Read the contents for more\n"
3593 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003594#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003595#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003596 " stack_trace\t\t- Shows the max stack trace when active\n"
3597 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003598 "\t\t\t Write into this file to reset the max size (trigger a\n"
3599 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003600#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003601 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3602 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003603#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003604#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003605 " events/\t\t- Directory containing all trace event subsystems:\n"
3606 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3607 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003608 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3609 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003610 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003611 " events/<system>/<event>/\t- Directory containing control files for\n"
3612 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003613 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3614 " filter\t\t- If set, only events passing filter are traced\n"
3615 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003616 "\t Format: <trigger>[:count][if <filter>]\n"
3617 "\t trigger: traceon, traceoff\n"
3618 "\t enable_event:<system>:<event>\n"
3619 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003620#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003621 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003622#endif
3623#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003624 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003625#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003626 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3627 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3628 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3629 "\t events/block/block_unplug/trigger\n"
3630 "\t The first disables tracing every time block_unplug is hit.\n"
3631 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3632 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3633 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3634 "\t Like function triggers, the counter is only decremented if it\n"
3635 "\t enabled or disabled tracing.\n"
3636 "\t To remove a trigger without a count:\n"
3637 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3638 "\t To remove a trigger with a count:\n"
3639 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3640 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003641;
3642
3643static ssize_t
3644tracing_readme_read(struct file *filp, char __user *ubuf,
3645 size_t cnt, loff_t *ppos)
3646{
3647 return simple_read_from_buffer(ubuf, cnt, ppos,
3648 readme_msg, strlen(readme_msg));
3649}
3650
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003651static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003652 .open = tracing_open_generic,
3653 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003654 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003655};
3656
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003657static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003658tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3659 size_t cnt, loff_t *ppos)
3660{
3661 char *buf_comm;
3662 char *file_buf;
3663 char *buf;
3664 int len = 0;
3665 int pid;
3666 int i;
3667
3668 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3669 if (!file_buf)
3670 return -ENOMEM;
3671
3672 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3673 if (!buf_comm) {
3674 kfree(file_buf);
3675 return -ENOMEM;
3676 }
3677
3678 buf = file_buf;
3679
3680 for (i = 0; i < SAVED_CMDLINES; i++) {
3681 int r;
3682
3683 pid = map_cmdline_to_pid[i];
3684 if (pid == -1 || pid == NO_CMDLINE_MAP)
3685 continue;
3686
3687 trace_find_cmdline(pid, buf_comm);
3688 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3689 buf += r;
3690 len += r;
3691 }
3692
3693 len = simple_read_from_buffer(ubuf, cnt, ppos,
3694 file_buf, len);
3695
3696 kfree(file_buf);
3697 kfree(buf_comm);
3698
3699 return len;
3700}
3701
3702static const struct file_operations tracing_saved_cmdlines_fops = {
3703 .open = tracing_open_generic,
3704 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003705 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003706};
3707
3708static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003709tracing_set_trace_read(struct file *filp, char __user *ubuf,
3710 size_t cnt, loff_t *ppos)
3711{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003712 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003713 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003714 int r;
3715
3716 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003717 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003718 mutex_unlock(&trace_types_lock);
3719
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003720 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003721}
3722
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003723int tracer_init(struct tracer *t, struct trace_array *tr)
3724{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003725 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003726 return t->init(tr);
3727}
3728
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003729static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003730{
3731 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003732
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003733 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003734 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003735}
3736
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003737#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003738/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003739static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3740 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003741{
3742 int cpu, ret = 0;
3743
3744 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3745 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003746 ret = ring_buffer_resize(trace_buf->buffer,
3747 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003748 if (ret < 0)
3749 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003750 per_cpu_ptr(trace_buf->data, cpu)->entries =
3751 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003752 }
3753 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003754 ret = ring_buffer_resize(trace_buf->buffer,
3755 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003756 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003757 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3758 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003759 }
3760
3761 return ret;
3762}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003763#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003764
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003765static int __tracing_resize_ring_buffer(struct trace_array *tr,
3766 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003767{
3768 int ret;
3769
3770 /*
3771 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003772 * we use the size that was given, and we can forget about
3773 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003774 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003775 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003776
Steven Rostedtb382ede62012-10-10 21:44:34 -04003777 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003778 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003779 return 0;
3780
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003781 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003782 if (ret < 0)
3783 return ret;
3784
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003785#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003786 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3787 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003788 goto out;
3789
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003790 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003791 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003792 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3793 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003794 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003795 /*
3796 * AARGH! We are left with different
3797 * size max buffer!!!!
3798 * The max buffer is our "snapshot" buffer.
3799 * When a tracer needs a snapshot (one of the
3800 * latency tracers), it swaps the max buffer
3801 * with the saved snap shot. We succeeded to
3802 * update the size of the main buffer, but failed to
3803 * update the size of the max buffer. But when we tried
3804 * to reset the main buffer to the original size, we
3805 * failed there too. This is very unlikely to
3806 * happen, but if it does, warn and kill all
3807 * tracing.
3808 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003809 WARN_ON(1);
3810 tracing_disabled = 1;
3811 }
3812 return ret;
3813 }
3814
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003815 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003816 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003817 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003818 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003819
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003820 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003821#endif /* CONFIG_TRACER_MAX_TRACE */
3822
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003823 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003824 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003825 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003826 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003827
3828 return ret;
3829}
3830
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003831static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3832 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003833{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003834 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003835
3836 mutex_lock(&trace_types_lock);
3837
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003838 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3839 /* make sure, this cpu is enabled in the mask */
3840 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3841 ret = -EINVAL;
3842 goto out;
3843 }
3844 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003846 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003847 if (ret < 0)
3848 ret = -ENOMEM;
3849
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003850out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003851 mutex_unlock(&trace_types_lock);
3852
3853 return ret;
3854}
3855
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003856
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003857/**
3858 * tracing_update_buffers - used by tracing facility to expand ring buffers
3859 *
3860 * To save on memory when the tracing is never used on a system with it
3861 * configured in. The ring buffers are set to a minimum size. But once
3862 * a user starts to use the tracing facility, then they need to grow
3863 * to their default size.
3864 *
3865 * This function is to be called when a tracer is about to be used.
3866 */
3867int tracing_update_buffers(void)
3868{
3869 int ret = 0;
3870
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003871 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003872 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003873 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003874 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003875 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003876
3877 return ret;
3878}
3879
Steven Rostedt577b7852009-02-26 23:43:05 -05003880struct trace_option_dentry;
3881
3882static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003883create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003884
3885static void
3886destroy_trace_option_files(struct trace_option_dentry *topts);
3887
Steven Rostedtb2821ae2009-02-02 21:38:32 -05003888static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003889{
Steven Rostedt577b7852009-02-26 23:43:05 -05003890 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003891 struct trace_array *tr = &global_trace;
3892 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003893#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003894 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003895#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003896 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003897
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003898 mutex_lock(&trace_types_lock);
3899
Steven Rostedt73c51622009-03-11 13:42:01 -04003900 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003901 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003902 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003903 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003904 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003905 ret = 0;
3906 }
3907
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003908 for (t = trace_types; t; t = t->next) {
3909 if (strcmp(t->name, buf) == 0)
3910 break;
3911 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003912 if (!t) {
3913 ret = -EINVAL;
3914 goto out;
3915 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003916 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917 goto out;
3918
Steven Rostedt9f029e82008-11-12 15:24:24 -05003919 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003920
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003921 tr->current_trace->enabled = false;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003922
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003923 if (tr->current_trace->reset)
3924 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003925
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003926 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003927 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003928
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003929#ifdef CONFIG_TRACER_MAX_TRACE
3930 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003931
3932 if (had_max_tr && !t->use_max_tr) {
3933 /*
3934 * We need to make sure that the update_max_tr sees that
3935 * current_trace changed to nop_trace to keep it from
3936 * swapping the buffers after we resize it.
3937 * The update_max_tr is called from interrupts disabled
3938 * so a synchronized_sched() is sufficient.
3939 */
3940 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003941 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003942 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003943#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003944 destroy_trace_option_files(topts);
3945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003946 topts = create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003947
3948#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003949 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003950 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003951 if (ret < 0)
3952 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003953 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003955
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003956 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003957 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003958 if (ret)
3959 goto out;
3960 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003961
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003962 tr->current_trace = t;
3963 tr->current_trace->enabled = true;
Steven Rostedt9f029e82008-11-12 15:24:24 -05003964 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003965 out:
3966 mutex_unlock(&trace_types_lock);
3967
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003968 return ret;
3969}
3970
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003971static ssize_t
3972tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3973 size_t cnt, loff_t *ppos)
3974{
Li Zefanee6c2c12009-09-18 14:06:47 +08003975 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003976 int i;
3977 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003978 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003979
Steven Rostedt60063a62008-10-28 10:44:24 -04003980 ret = cnt;
3981
Li Zefanee6c2c12009-09-18 14:06:47 +08003982 if (cnt > MAX_TRACER_SIZE)
3983 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003984
3985 if (copy_from_user(&buf, ubuf, cnt))
3986 return -EFAULT;
3987
3988 buf[cnt] = 0;
3989
3990 /* strip ending whitespace. */
3991 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3992 buf[i] = 0;
3993
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003994 err = tracing_set_tracer(buf);
3995 if (err)
3996 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003997
Jiri Olsacf8517c2009-10-23 19:36:16 -04003998 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003999
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004000 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004001}
4002
4003static ssize_t
4004tracing_max_lat_read(struct file *filp, char __user *ubuf,
4005 size_t cnt, loff_t *ppos)
4006{
4007 unsigned long *ptr = filp->private_data;
4008 char buf[64];
4009 int r;
4010
Steven Rostedtcffae432008-05-12 21:21:00 +02004011 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004012 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004013 if (r > sizeof(buf))
4014 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004015 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016}
4017
4018static ssize_t
4019tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4020 size_t cnt, loff_t *ppos)
4021{
Hannes Eder5e398412009-02-10 19:44:34 +01004022 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004023 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004024 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004025
Peter Huewe22fe9b52011-06-07 21:58:27 +02004026 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4027 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004028 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004029
4030 *ptr = val * 1000;
4031
4032 return cnt;
4033}
4034
Steven Rostedtb3806b42008-05-12 21:20:46 +02004035static int tracing_open_pipe(struct inode *inode, struct file *filp)
4036{
Oleg Nesterov15544202013-07-23 17:25:57 +02004037 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004038 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004039 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004040
4041 if (tracing_disabled)
4042 return -ENODEV;
4043
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004044 if (trace_array_get(tr) < 0)
4045 return -ENODEV;
4046
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004047 mutex_lock(&trace_types_lock);
4048
Steven Rostedtb3806b42008-05-12 21:20:46 +02004049 /* create a buffer to store the information to pass to userspace */
4050 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004051 if (!iter) {
4052 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004053 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004054 goto out;
4055 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004056
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004057 /*
4058 * We make a copy of the current tracer to avoid concurrent
4059 * changes on it while we are reading.
4060 */
4061 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4062 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004063 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004064 goto fail;
4065 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004066 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004067
4068 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4069 ret = -ENOMEM;
4070 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304071 }
4072
Steven Rostedta3097202008-11-07 22:36:02 -05004073 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304074 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004075
Steven Rostedt112f38a72009-06-01 15:16:05 -04004076 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4077 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4078
David Sharp8be07092012-11-13 12:18:22 -08004079 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004080 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004081 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4082
Oleg Nesterov15544202013-07-23 17:25:57 +02004083 iter->tr = tr;
4084 iter->trace_buffer = &tr->trace_buffer;
4085 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004086 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004087 filp->private_data = iter;
4088
Steven Rostedt107bad82008-05-12 21:21:01 +02004089 if (iter->trace->pipe_open)
4090 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004091
Arnd Bergmannb4447862010-07-07 23:40:11 +02004092 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004093out:
4094 mutex_unlock(&trace_types_lock);
4095 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004096
4097fail:
4098 kfree(iter->trace);
4099 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004100 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004101 mutex_unlock(&trace_types_lock);
4102 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004103}
4104
4105static int tracing_release_pipe(struct inode *inode, struct file *file)
4106{
4107 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004108 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004109
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004110 mutex_lock(&trace_types_lock);
4111
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004112 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004113 iter->trace->pipe_close(iter);
4114
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004115 mutex_unlock(&trace_types_lock);
4116
Rusty Russell44623442009-01-01 10:12:23 +10304117 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004118 mutex_destroy(&iter->mutex);
4119 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004120 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004121
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004122 trace_array_put(tr);
4123
Steven Rostedtb3806b42008-05-12 21:20:46 +02004124 return 0;
4125}
4126
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004127static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004128trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004129{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004130 /* Iterators are static, they should be filled or empty */
4131 if (trace_buffer_iter(iter, iter->cpu_file))
4132 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004133
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004134 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004135 /*
4136 * Always select as readable when in blocking mode
4137 */
4138 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004139 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004140 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004141 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004142}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004143
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004144static unsigned int
4145tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4146{
4147 struct trace_iterator *iter = filp->private_data;
4148
4149 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004150}
4151
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004152/*
4153 * This is a make-shift waitqueue.
4154 * A tracer might use this callback on some rare cases:
4155 *
4156 * 1) the current tracer might hold the runqueue lock when it wakes up
4157 * a reader, hence a deadlock (sched, function, and function graph tracers)
4158 * 2) the function tracers, trace all functions, we don't want
4159 * the overhead of calling wake_up and friends
4160 * (and tracing them too)
4161 *
4162 * Anyway, this is really very primitive wakeup.
4163 */
4164void poll_wait_pipe(struct trace_iterator *iter)
4165{
4166 set_current_state(TASK_INTERRUPTIBLE);
4167 /* sleep for 100 msecs, and try again. */
4168 schedule_timeout(HZ / 10);
4169}
4170
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004171/* Must be called with trace_types_lock mutex held. */
4172static int tracing_wait_pipe(struct file *filp)
4173{
4174 struct trace_iterator *iter = filp->private_data;
4175
4176 while (trace_empty(iter)) {
4177
4178 if ((filp->f_flags & O_NONBLOCK)) {
4179 return -EAGAIN;
4180 }
4181
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004182 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004183
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004184 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004185
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004186 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004187
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004188 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004189 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004190
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004191 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004192 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004193 * We still block if tracing is disabled, but we have never
4194 * read anything. This allows a user to cat this file, and
4195 * then enable tracing. But after we have read something,
4196 * we give an EOF when tracing is again disabled.
4197 *
4198 * iter->pos will be 0 if we haven't read anything.
4199 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004200 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004201 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004202 }
4203
4204 return 1;
4205}
4206
Steven Rostedtb3806b42008-05-12 21:20:46 +02004207/*
4208 * Consumer reader.
4209 */
4210static ssize_t
4211tracing_read_pipe(struct file *filp, char __user *ubuf,
4212 size_t cnt, loff_t *ppos)
4213{
4214 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004215 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004216 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004217
4218 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004219 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4220 if (sret != -EBUSY)
4221 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004222
Steven Rostedtf9520752009-03-02 14:04:40 -05004223 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004224
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004225 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004226 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004227 if (unlikely(iter->trace->name != tr->current_trace->name))
4228 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004229 mutex_unlock(&trace_types_lock);
4230
4231 /*
4232 * Avoid more than one consumer on a single file descriptor
4233 * This is just a matter of traces coherency, the ring buffer itself
4234 * is protected.
4235 */
4236 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004237 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004238 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4239 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004240 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004241 }
4242
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004243waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004244 sret = tracing_wait_pipe(filp);
4245 if (sret <= 0)
4246 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004247
4248 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004249 if (trace_empty(iter)) {
4250 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004251 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004252 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004253
4254 if (cnt >= PAGE_SIZE)
4255 cnt = PAGE_SIZE - 1;
4256
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004257 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004258 memset(&iter->seq, 0,
4259 sizeof(struct trace_iterator) -
4260 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004261 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004262 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004263
Lai Jiangshan4f535962009-05-18 19:35:34 +08004264 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004265 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004266 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004267 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004268 int len = iter->seq.len;
4269
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004270 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004271 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004272 /* don't print partial lines */
4273 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004274 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004275 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004276 if (ret != TRACE_TYPE_NO_CONSUME)
4277 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004278
4279 if (iter->seq.len >= cnt)
4280 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004281
4282 /*
4283 * Setting the full flag means we reached the trace_seq buffer
4284 * size and we should leave by partial output condition above.
4285 * One of the trace_seq_* functions is not used properly.
4286 */
4287 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4288 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004289 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004290 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004291 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004292
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004294 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4295 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004296 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004297
4298 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004299 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004300 * entries, go back to wait for more entries.
4301 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004302 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004303 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004304
Steven Rostedt107bad82008-05-12 21:21:01 +02004305out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004306 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004307
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004308 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004309}
4310
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004311static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4312 unsigned int idx)
4313{
4314 __free_page(spd->pages[idx]);
4315}
4316
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004317static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004318 .can_merge = 0,
4319 .map = generic_pipe_buf_map,
4320 .unmap = generic_pipe_buf_unmap,
4321 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004322 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004323 .steal = generic_pipe_buf_steal,
4324 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004325};
4326
Steven Rostedt34cd4992009-02-09 12:06:29 -05004327static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004328tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004329{
4330 size_t count;
4331 int ret;
4332
4333 /* Seq buffer is page-sized, exactly what we need. */
4334 for (;;) {
4335 count = iter->seq.len;
4336 ret = print_trace_line(iter);
4337 count = iter->seq.len - count;
4338 if (rem < count) {
4339 rem = 0;
4340 iter->seq.len -= count;
4341 break;
4342 }
4343 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4344 iter->seq.len -= count;
4345 break;
4346 }
4347
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004348 if (ret != TRACE_TYPE_NO_CONSUME)
4349 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004350 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004351 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004352 rem = 0;
4353 iter->ent = NULL;
4354 break;
4355 }
4356 }
4357
4358 return rem;
4359}
4360
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004361static ssize_t tracing_splice_read_pipe(struct file *filp,
4362 loff_t *ppos,
4363 struct pipe_inode_info *pipe,
4364 size_t len,
4365 unsigned int flags)
4366{
Jens Axboe35f3d142010-05-20 10:43:18 +02004367 struct page *pages_def[PIPE_DEF_BUFFERS];
4368 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004369 struct trace_iterator *iter = filp->private_data;
4370 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004371 .pages = pages_def,
4372 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004373 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004374 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004375 .flags = flags,
4376 .ops = &tracing_pipe_buf_ops,
4377 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004378 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004379 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004380 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004381 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004382 unsigned int i;
4383
Jens Axboe35f3d142010-05-20 10:43:18 +02004384 if (splice_grow_spd(pipe, &spd))
4385 return -ENOMEM;
4386
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004387 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004388 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004389 if (unlikely(iter->trace->name != tr->current_trace->name))
4390 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004391 mutex_unlock(&trace_types_lock);
4392
4393 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004394
4395 if (iter->trace->splice_read) {
4396 ret = iter->trace->splice_read(iter, filp,
4397 ppos, pipe, len, flags);
4398 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004399 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004400 }
4401
4402 ret = tracing_wait_pipe(filp);
4403 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004404 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004405
Jason Wessel955b61e2010-08-05 09:22:23 -05004406 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004407 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004408 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004409 }
4410
Lai Jiangshan4f535962009-05-18 19:35:34 +08004411 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004412 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004413
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004414 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02004415 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4416 spd.pages[i] = alloc_page(GFP_KERNEL);
4417 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004418 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004419
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004420 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004421
4422 /* Copy the data into the page, so we can start over. */
4423 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004424 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004425 iter->seq.len);
4426 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004427 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004428 break;
4429 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004430 spd.partial[i].offset = 0;
4431 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004432
Steven Rostedtf9520752009-03-02 14:04:40 -05004433 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004434 }
4435
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004436 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004437 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004438 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004439
4440 spd.nr_pages = i;
4441
Jens Axboe35f3d142010-05-20 10:43:18 +02004442 ret = splice_to_pipe(pipe, &spd);
4443out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004444 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004445 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004446
Steven Rostedt34cd4992009-02-09 12:06:29 -05004447out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004448 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004449 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004450}
4451
Steven Rostedta98a3c32008-05-12 21:20:59 +02004452static ssize_t
4453tracing_entries_read(struct file *filp, char __user *ubuf,
4454 size_t cnt, loff_t *ppos)
4455{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004456 struct inode *inode = file_inode(filp);
4457 struct trace_array *tr = inode->i_private;
4458 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004459 char buf[64];
4460 int r = 0;
4461 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004462
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004463 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004464
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004465 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004466 int cpu, buf_size_same;
4467 unsigned long size;
4468
4469 size = 0;
4470 buf_size_same = 1;
4471 /* check if all cpu sizes are same */
4472 for_each_tracing_cpu(cpu) {
4473 /* fill in the size from first enabled cpu */
4474 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004475 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4476 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004477 buf_size_same = 0;
4478 break;
4479 }
4480 }
4481
4482 if (buf_size_same) {
4483 if (!ring_buffer_expanded)
4484 r = sprintf(buf, "%lu (expanded: %lu)\n",
4485 size >> 10,
4486 trace_buf_size >> 10);
4487 else
4488 r = sprintf(buf, "%lu\n", size >> 10);
4489 } else
4490 r = sprintf(buf, "X\n");
4491 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004492 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004493
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004494 mutex_unlock(&trace_types_lock);
4495
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004496 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4497 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004498}
4499
4500static ssize_t
4501tracing_entries_write(struct file *filp, const char __user *ubuf,
4502 size_t cnt, loff_t *ppos)
4503{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004504 struct inode *inode = file_inode(filp);
4505 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004506 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004507 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004508
Peter Huewe22fe9b52011-06-07 21:58:27 +02004509 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4510 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004511 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004512
4513 /* must have at least 1 entry */
4514 if (!val)
4515 return -EINVAL;
4516
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004517 /* value is in KB */
4518 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004519 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004520 if (ret < 0)
4521 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004522
Jiri Olsacf8517c2009-10-23 19:36:16 -04004523 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004524
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004525 return cnt;
4526}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004527
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004528static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004529tracing_total_entries_read(struct file *filp, char __user *ubuf,
4530 size_t cnt, loff_t *ppos)
4531{
4532 struct trace_array *tr = filp->private_data;
4533 char buf[64];
4534 int r, cpu;
4535 unsigned long size = 0, expanded_size = 0;
4536
4537 mutex_lock(&trace_types_lock);
4538 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004539 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004540 if (!ring_buffer_expanded)
4541 expanded_size += trace_buf_size >> 10;
4542 }
4543 if (ring_buffer_expanded)
4544 r = sprintf(buf, "%lu\n", size);
4545 else
4546 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4547 mutex_unlock(&trace_types_lock);
4548
4549 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4550}
4551
4552static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004553tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4554 size_t cnt, loff_t *ppos)
4555{
4556 /*
4557 * There is no need to read what the user has written, this function
4558 * is just to make sure that there is no error when "echo" is used
4559 */
4560
4561 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004562
4563 return cnt;
4564}
4565
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004566static int
4567tracing_free_buffer_release(struct inode *inode, struct file *filp)
4568{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004569 struct trace_array *tr = inode->i_private;
4570
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004571 /* disable tracing ? */
4572 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004573 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004574 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004575 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004576
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004577 trace_array_put(tr);
4578
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004579 return 0;
4580}
4581
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004582static ssize_t
4583tracing_mark_write(struct file *filp, const char __user *ubuf,
4584 size_t cnt, loff_t *fpos)
4585{
Steven Rostedtd696b582011-09-22 11:50:27 -04004586 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004587 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004588 struct ring_buffer_event *event;
4589 struct ring_buffer *buffer;
4590 struct print_entry *entry;
4591 unsigned long irq_flags;
4592 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004593 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004594 int nr_pages = 1;
4595 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004596 int offset;
4597 int size;
4598 int len;
4599 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004600 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004601
Steven Rostedtc76f0692008-11-07 22:36:02 -05004602 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004603 return -EINVAL;
4604
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004605 if (!(trace_flags & TRACE_ITER_MARKERS))
4606 return -EINVAL;
4607
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004608 if (cnt > TRACE_BUF_SIZE)
4609 cnt = TRACE_BUF_SIZE;
4610
Steven Rostedtd696b582011-09-22 11:50:27 -04004611 /*
4612 * Userspace is injecting traces into the kernel trace buffer.
4613 * We want to be as non intrusive as possible.
4614 * To do so, we do not want to allocate any special buffers
4615 * or take any locks, but instead write the userspace data
4616 * straight into the ring buffer.
4617 *
4618 * First we need to pin the userspace buffer into memory,
4619 * which, most likely it is, because it just referenced it.
4620 * But there's no guarantee that it is. By using get_user_pages_fast()
4621 * and kmap_atomic/kunmap_atomic() we can get access to the
4622 * pages directly. We then write the data directly into the
4623 * ring buffer.
4624 */
4625 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004626
Steven Rostedtd696b582011-09-22 11:50:27 -04004627 /* check if we cross pages */
4628 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4629 nr_pages = 2;
4630
4631 offset = addr & (PAGE_SIZE - 1);
4632 addr &= PAGE_MASK;
4633
4634 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4635 if (ret < nr_pages) {
4636 while (--ret >= 0)
4637 put_page(pages[ret]);
4638 written = -EFAULT;
4639 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004640 }
4641
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004642 for (i = 0; i < nr_pages; i++)
4643 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004644
4645 local_save_flags(irq_flags);
4646 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004647 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004648 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4649 irq_flags, preempt_count());
4650 if (!event) {
4651 /* Ring buffer disabled, return as if not open for write */
4652 written = -EBADF;
4653 goto out_unlock;
4654 }
4655
4656 entry = ring_buffer_event_data(event);
4657 entry->ip = _THIS_IP_;
4658
4659 if (nr_pages == 2) {
4660 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004661 memcpy(&entry->buf, map_page[0] + offset, len);
4662 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004663 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004664 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004665
4666 if (entry->buf[cnt - 1] != '\n') {
4667 entry->buf[cnt] = '\n';
4668 entry->buf[cnt + 1] = '\0';
4669 } else
4670 entry->buf[cnt] = '\0';
4671
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004672 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004673
4674 written = cnt;
4675
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004676 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004677
Steven Rostedtd696b582011-09-22 11:50:27 -04004678 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004679 for (i = 0; i < nr_pages; i++){
4680 kunmap_atomic(map_page[i]);
4681 put_page(pages[i]);
4682 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004683 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004684 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004685}
4686
Li Zefan13f16d22009-12-08 11:16:11 +08004687static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004688{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004689 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004690 int i;
4691
4692 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004693 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004694 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004695 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4696 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004697 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004698
Li Zefan13f16d22009-12-08 11:16:11 +08004699 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004700}
4701
4702static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4703 size_t cnt, loff_t *fpos)
4704{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004707 char buf[64];
4708 const char *clockstr;
4709 int i;
4710
4711 if (cnt >= sizeof(buf))
4712 return -EINVAL;
4713
4714 if (copy_from_user(&buf, ubuf, cnt))
4715 return -EFAULT;
4716
4717 buf[cnt] = 0;
4718
4719 clockstr = strstrip(buf);
4720
4721 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4722 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4723 break;
4724 }
4725 if (i == ARRAY_SIZE(trace_clocks))
4726 return -EINVAL;
4727
Zhaolei5079f322009-08-25 16:12:56 +08004728 mutex_lock(&trace_types_lock);
4729
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004730 tr->clock_id = i;
4731
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004732 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004733
David Sharp60303ed2012-10-11 16:27:52 -07004734 /*
4735 * New clock may not be consistent with the previous clock.
4736 * Reset the buffer so that it doesn't have incomparable timestamps.
4737 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004738 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004739
4740#ifdef CONFIG_TRACER_MAX_TRACE
4741 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4742 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004743 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004744#endif
David Sharp60303ed2012-10-11 16:27:52 -07004745
Zhaolei5079f322009-08-25 16:12:56 +08004746 mutex_unlock(&trace_types_lock);
4747
4748 *fpos += cnt;
4749
4750 return cnt;
4751}
4752
Li Zefan13f16d22009-12-08 11:16:11 +08004753static int tracing_clock_open(struct inode *inode, struct file *file)
4754{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004755 struct trace_array *tr = inode->i_private;
4756 int ret;
4757
Li Zefan13f16d22009-12-08 11:16:11 +08004758 if (tracing_disabled)
4759 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004760
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004761 if (trace_array_get(tr))
4762 return -ENODEV;
4763
4764 ret = single_open(file, tracing_clock_show, inode->i_private);
4765 if (ret < 0)
4766 trace_array_put(tr);
4767
4768 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004769}
4770
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004771struct ftrace_buffer_info {
4772 struct trace_iterator iter;
4773 void *spare;
4774 unsigned int read;
4775};
4776
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004777#ifdef CONFIG_TRACER_SNAPSHOT
4778static int tracing_snapshot_open(struct inode *inode, struct file *file)
4779{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004780 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004781 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004782 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004783 int ret = 0;
4784
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004785 if (trace_array_get(tr) < 0)
4786 return -ENODEV;
4787
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004788 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004789 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004790 if (IS_ERR(iter))
4791 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004792 } else {
4793 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004794 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004795 m = kzalloc(sizeof(*m), GFP_KERNEL);
4796 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004797 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004798 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4799 if (!iter) {
4800 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004801 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004802 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004803 ret = 0;
4804
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004805 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004806 iter->trace_buffer = &tr->max_buffer;
4807 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004808 m->private = iter;
4809 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004810 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004811out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004812 if (ret < 0)
4813 trace_array_put(tr);
4814
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004815 return ret;
4816}
4817
4818static ssize_t
4819tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4820 loff_t *ppos)
4821{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004822 struct seq_file *m = filp->private_data;
4823 struct trace_iterator *iter = m->private;
4824 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004825 unsigned long val;
4826 int ret;
4827
4828 ret = tracing_update_buffers();
4829 if (ret < 0)
4830 return ret;
4831
4832 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4833 if (ret)
4834 return ret;
4835
4836 mutex_lock(&trace_types_lock);
4837
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004838 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004839 ret = -EBUSY;
4840 goto out;
4841 }
4842
4843 switch (val) {
4844 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004845 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4846 ret = -EINVAL;
4847 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004848 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004849 if (tr->allocated_snapshot)
4850 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004851 break;
4852 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004853/* Only allow per-cpu swap if the ring buffer supports it */
4854#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4855 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4856 ret = -EINVAL;
4857 break;
4858 }
4859#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004860 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004861 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004862 if (ret < 0)
4863 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004864 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004865 local_irq_disable();
4866 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004867 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004868 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004869 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004870 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004871 local_irq_enable();
4872 break;
4873 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004874 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004875 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4876 tracing_reset_online_cpus(&tr->max_buffer);
4877 else
4878 tracing_reset(&tr->max_buffer, iter->cpu_file);
4879 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004880 break;
4881 }
4882
4883 if (ret >= 0) {
4884 *ppos += cnt;
4885 ret = cnt;
4886 }
4887out:
4888 mutex_unlock(&trace_types_lock);
4889 return ret;
4890}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004891
4892static int tracing_snapshot_release(struct inode *inode, struct file *file)
4893{
4894 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004895 int ret;
4896
4897 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004898
4899 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004900 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004901
4902 /* If write only, the seq_file is just a stub */
4903 if (m)
4904 kfree(m->private);
4905 kfree(m);
4906
4907 return 0;
4908}
4909
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004910static int tracing_buffers_open(struct inode *inode, struct file *filp);
4911static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4912 size_t count, loff_t *ppos);
4913static int tracing_buffers_release(struct inode *inode, struct file *file);
4914static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4915 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4916
4917static int snapshot_raw_open(struct inode *inode, struct file *filp)
4918{
4919 struct ftrace_buffer_info *info;
4920 int ret;
4921
4922 ret = tracing_buffers_open(inode, filp);
4923 if (ret < 0)
4924 return ret;
4925
4926 info = filp->private_data;
4927
4928 if (info->iter.trace->use_max_tr) {
4929 tracing_buffers_release(inode, filp);
4930 return -EBUSY;
4931 }
4932
4933 info->iter.snapshot = true;
4934 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4935
4936 return ret;
4937}
4938
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004939#endif /* CONFIG_TRACER_SNAPSHOT */
4940
4941
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004942static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004943 .open = tracing_open_generic,
4944 .read = tracing_max_lat_read,
4945 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004946 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004947};
4948
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004949static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004950 .open = tracing_open_generic,
4951 .read = tracing_set_trace_read,
4952 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004953 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004954};
4955
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004956static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004957 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004958 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004959 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004960 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004961 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004962 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02004963};
4964
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004965static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004966 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004967 .read = tracing_entries_read,
4968 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004969 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004970 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004971};
4972
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004973static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004974 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004975 .read = tracing_total_entries_read,
4976 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004977 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004978};
4979
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004980static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004981 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004982 .write = tracing_free_buffer_write,
4983 .release = tracing_free_buffer_release,
4984};
4985
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004986static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004987 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004988 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004989 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004990 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004991};
4992
Zhaolei5079f322009-08-25 16:12:56 +08004993static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08004994 .open = tracing_clock_open,
4995 .read = seq_read,
4996 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004997 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08004998 .write = tracing_clock_write,
4999};
5000
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005001#ifdef CONFIG_TRACER_SNAPSHOT
5002static const struct file_operations snapshot_fops = {
5003 .open = tracing_snapshot_open,
5004 .read = seq_read,
5005 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005006 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005007 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005008};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005009
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005010static const struct file_operations snapshot_raw_fops = {
5011 .open = snapshot_raw_open,
5012 .read = tracing_buffers_read,
5013 .release = tracing_buffers_release,
5014 .splice_read = tracing_buffers_splice_read,
5015 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005016};
5017
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005018#endif /* CONFIG_TRACER_SNAPSHOT */
5019
Steven Rostedt2cadf912008-12-01 22:20:19 -05005020static int tracing_buffers_open(struct inode *inode, struct file *filp)
5021{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005022 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005023 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005024 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005025
5026 if (tracing_disabled)
5027 return -ENODEV;
5028
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005029 if (trace_array_get(tr) < 0)
5030 return -ENODEV;
5031
Steven Rostedt2cadf912008-12-01 22:20:19 -05005032 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005033 if (!info) {
5034 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005035 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005036 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005037
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005038 mutex_lock(&trace_types_lock);
5039
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005040 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005041 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005042 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005043 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005044 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005045 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005046 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005047
5048 filp->private_data = info;
5049
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005050 mutex_unlock(&trace_types_lock);
5051
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005052 ret = nonseekable_open(inode, filp);
5053 if (ret < 0)
5054 trace_array_put(tr);
5055
5056 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005057}
5058
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005059static unsigned int
5060tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5061{
5062 struct ftrace_buffer_info *info = filp->private_data;
5063 struct trace_iterator *iter = &info->iter;
5064
5065 return trace_poll(iter, filp, poll_table);
5066}
5067
Steven Rostedt2cadf912008-12-01 22:20:19 -05005068static ssize_t
5069tracing_buffers_read(struct file *filp, char __user *ubuf,
5070 size_t count, loff_t *ppos)
5071{
5072 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005073 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005074 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005075 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005076
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005077 if (!count)
5078 return 0;
5079
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005080 mutex_lock(&trace_types_lock);
5081
5082#ifdef CONFIG_TRACER_MAX_TRACE
5083 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5084 size = -EBUSY;
5085 goto out_unlock;
5086 }
5087#endif
5088
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005089 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005090 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5091 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005092 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005093 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005094 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005095
Steven Rostedt2cadf912008-12-01 22:20:19 -05005096 /* Do we have previous read data to read? */
5097 if (info->read < PAGE_SIZE)
5098 goto read;
5099
Steven Rostedtb6273442013-02-28 13:44:11 -05005100 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005101 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005102 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005103 &info->spare,
5104 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005105 iter->cpu_file, 0);
5106 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005107
5108 if (ret < 0) {
5109 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005110 if ((filp->f_flags & O_NONBLOCK)) {
5111 size = -EAGAIN;
5112 goto out_unlock;
5113 }
5114 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005115 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005116 mutex_lock(&trace_types_lock);
5117 if (signal_pending(current)) {
5118 size = -EINTR;
5119 goto out_unlock;
5120 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005121 goto again;
5122 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005123 size = 0;
5124 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005125 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005126
Steven Rostedt436fc282011-10-14 10:44:25 -04005127 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005128 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005129 size = PAGE_SIZE - info->read;
5130 if (size > count)
5131 size = count;
5132
5133 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005134 if (ret == size) {
5135 size = -EFAULT;
5136 goto out_unlock;
5137 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005138 size -= ret;
5139
Steven Rostedt2cadf912008-12-01 22:20:19 -05005140 *ppos += size;
5141 info->read += size;
5142
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005143 out_unlock:
5144 mutex_unlock(&trace_types_lock);
5145
Steven Rostedt2cadf912008-12-01 22:20:19 -05005146 return size;
5147}
5148
5149static int tracing_buffers_release(struct inode *inode, struct file *file)
5150{
5151 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005152 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005153
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005154 mutex_lock(&trace_types_lock);
5155
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005156 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005157
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005158 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005159 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005160 kfree(info);
5161
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005162 mutex_unlock(&trace_types_lock);
5163
Steven Rostedt2cadf912008-12-01 22:20:19 -05005164 return 0;
5165}
5166
5167struct buffer_ref {
5168 struct ring_buffer *buffer;
5169 void *page;
5170 int ref;
5171};
5172
5173static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5174 struct pipe_buffer *buf)
5175{
5176 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5177
5178 if (--ref->ref)
5179 return;
5180
5181 ring_buffer_free_read_page(ref->buffer, ref->page);
5182 kfree(ref);
5183 buf->private = 0;
5184}
5185
Steven Rostedt2cadf912008-12-01 22:20:19 -05005186static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5187 struct pipe_buffer *buf)
5188{
5189 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5190
5191 ref->ref++;
5192}
5193
5194/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005195static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005196 .can_merge = 0,
5197 .map = generic_pipe_buf_map,
5198 .unmap = generic_pipe_buf_unmap,
5199 .confirm = generic_pipe_buf_confirm,
5200 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005201 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005202 .get = buffer_pipe_buf_get,
5203};
5204
5205/*
5206 * Callback from splice_to_pipe(), if we need to release some pages
5207 * at the end of the spd in case we error'ed out in filling the pipe.
5208 */
5209static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5210{
5211 struct buffer_ref *ref =
5212 (struct buffer_ref *)spd->partial[i].private;
5213
5214 if (--ref->ref)
5215 return;
5216
5217 ring_buffer_free_read_page(ref->buffer, ref->page);
5218 kfree(ref);
5219 spd->partial[i].private = 0;
5220}
5221
5222static ssize_t
5223tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5224 struct pipe_inode_info *pipe, size_t len,
5225 unsigned int flags)
5226{
5227 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005228 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005229 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5230 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005231 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005232 .pages = pages_def,
5233 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005234 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005235 .flags = flags,
5236 .ops = &buffer_pipe_buf_ops,
5237 .spd_release = buffer_spd_release,
5238 };
5239 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005240 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005241 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005242
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005243 mutex_lock(&trace_types_lock);
5244
5245#ifdef CONFIG_TRACER_MAX_TRACE
5246 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5247 ret = -EBUSY;
5248 goto out;
5249 }
5250#endif
5251
5252 if (splice_grow_spd(pipe, &spd)) {
5253 ret = -ENOMEM;
5254 goto out;
5255 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005256
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005257 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005258 ret = -EINVAL;
5259 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005260 }
5261
5262 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005263 if (len < PAGE_SIZE) {
5264 ret = -EINVAL;
5265 goto out;
5266 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005267 len &= PAGE_MASK;
5268 }
5269
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005270 again:
5271 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005272 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005273
Jens Axboe35f3d142010-05-20 10:43:18 +02005274 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005275 struct page *page;
5276 int r;
5277
5278 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5279 if (!ref)
5280 break;
5281
Steven Rostedt7267fa62009-04-29 00:16:21 -04005282 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005283 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005284 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005285 if (!ref->page) {
5286 kfree(ref);
5287 break;
5288 }
5289
5290 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005291 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005292 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005293 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005294 kfree(ref);
5295 break;
5296 }
5297
5298 /*
5299 * zero out any left over data, this is going to
5300 * user land.
5301 */
5302 size = ring_buffer_page_len(ref->page);
5303 if (size < PAGE_SIZE)
5304 memset(ref->page + size, 0, PAGE_SIZE - size);
5305
5306 page = virt_to_page(ref->page);
5307
5308 spd.pages[i] = page;
5309 spd.partial[i].len = PAGE_SIZE;
5310 spd.partial[i].offset = 0;
5311 spd.partial[i].private = (unsigned long)ref;
5312 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005313 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005314
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005315 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316 }
5317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005318 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005319 spd.nr_pages = i;
5320
5321 /* did we read anything? */
5322 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005323 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005324 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005325 goto out;
5326 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005327 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005328 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005329 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005330 if (signal_pending(current)) {
5331 ret = -EINTR;
5332 goto out;
5333 }
5334 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005335 }
5336
5337 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005338 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005339out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005340 mutex_unlock(&trace_types_lock);
5341
Steven Rostedt2cadf912008-12-01 22:20:19 -05005342 return ret;
5343}
5344
5345static const struct file_operations tracing_buffers_fops = {
5346 .open = tracing_buffers_open,
5347 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005348 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005349 .release = tracing_buffers_release,
5350 .splice_read = tracing_buffers_splice_read,
5351 .llseek = no_llseek,
5352};
5353
Steven Rostedtc8d77182009-04-29 18:03:45 -04005354static ssize_t
5355tracing_stats_read(struct file *filp, char __user *ubuf,
5356 size_t count, loff_t *ppos)
5357{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005358 struct inode *inode = file_inode(filp);
5359 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005360 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005361 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005362 struct trace_seq *s;
5363 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005364 unsigned long long t;
5365 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005366
Li Zefane4f2d102009-06-15 10:57:28 +08005367 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005368 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005369 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005370
5371 trace_seq_init(s);
5372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005373 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005374 trace_seq_printf(s, "entries: %ld\n", cnt);
5375
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005376 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005377 trace_seq_printf(s, "overrun: %ld\n", cnt);
5378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005379 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005380 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5381
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005382 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005383 trace_seq_printf(s, "bytes: %ld\n", cnt);
5384
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005385 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005386 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005387 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005388 usec_rem = do_div(t, USEC_PER_SEC);
5389 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5390 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005392 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005393 usec_rem = do_div(t, USEC_PER_SEC);
5394 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5395 } else {
5396 /* counter or tsc mode for trace_clock */
5397 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005398 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005399
5400 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005401 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005402 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005403
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005404 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005405 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5406
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005407 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005408 trace_seq_printf(s, "read events: %ld\n", cnt);
5409
Steven Rostedtc8d77182009-04-29 18:03:45 -04005410 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5411
5412 kfree(s);
5413
5414 return count;
5415}
5416
5417static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005418 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005419 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005420 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005421 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005422};
5423
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005424#ifdef CONFIG_DYNAMIC_FTRACE
5425
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005426int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005427{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005428 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005429}
5430
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005431static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005432tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005433 size_t cnt, loff_t *ppos)
5434{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005435 static char ftrace_dyn_info_buffer[1024];
5436 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005437 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005438 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005439 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005440 int r;
5441
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005442 mutex_lock(&dyn_info_mutex);
5443 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005444
Steven Rostedta26a2a22008-10-31 00:03:22 -04005445 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005446 buf[r++] = '\n';
5447
5448 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5449
5450 mutex_unlock(&dyn_info_mutex);
5451
5452 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005453}
5454
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005455static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005456 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005457 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005458 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005459};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005460#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005461
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005462#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5463static void
5464ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005465{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005466 tracing_snapshot();
5467}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005468
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005469static void
5470ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5471{
5472 unsigned long *count = (long *)data;
5473
5474 if (!*count)
5475 return;
5476
5477 if (*count != -1)
5478 (*count)--;
5479
5480 tracing_snapshot();
5481}
5482
5483static int
5484ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5485 struct ftrace_probe_ops *ops, void *data)
5486{
5487 long count = (long)data;
5488
5489 seq_printf(m, "%ps:", (void *)ip);
5490
5491 seq_printf(m, "snapshot");
5492
5493 if (count == -1)
5494 seq_printf(m, ":unlimited\n");
5495 else
5496 seq_printf(m, ":count=%ld\n", count);
5497
5498 return 0;
5499}
5500
5501static struct ftrace_probe_ops snapshot_probe_ops = {
5502 .func = ftrace_snapshot,
5503 .print = ftrace_snapshot_print,
5504};
5505
5506static struct ftrace_probe_ops snapshot_count_probe_ops = {
5507 .func = ftrace_count_snapshot,
5508 .print = ftrace_snapshot_print,
5509};
5510
5511static int
5512ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5513 char *glob, char *cmd, char *param, int enable)
5514{
5515 struct ftrace_probe_ops *ops;
5516 void *count = (void *)-1;
5517 char *number;
5518 int ret;
5519
5520 /* hash funcs only work with set_ftrace_filter */
5521 if (!enable)
5522 return -EINVAL;
5523
5524 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5525
5526 if (glob[0] == '!') {
5527 unregister_ftrace_function_probe_func(glob+1, ops);
5528 return 0;
5529 }
5530
5531 if (!param)
5532 goto out_reg;
5533
5534 number = strsep(&param, ":");
5535
5536 if (!strlen(number))
5537 goto out_reg;
5538
5539 /*
5540 * We use the callback data field (which is a pointer)
5541 * as our counter.
5542 */
5543 ret = kstrtoul(number, 0, (unsigned long *)&count);
5544 if (ret)
5545 return ret;
5546
5547 out_reg:
5548 ret = register_ftrace_function_probe(glob, ops, count);
5549
5550 if (ret >= 0)
5551 alloc_snapshot(&global_trace);
5552
5553 return ret < 0 ? ret : 0;
5554}
5555
5556static struct ftrace_func_command ftrace_snapshot_cmd = {
5557 .name = "snapshot",
5558 .func = ftrace_trace_snapshot_callback,
5559};
5560
Tom Zanussi38de93a2013-10-24 08:34:18 -05005561static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005562{
5563 return register_ftrace_command(&ftrace_snapshot_cmd);
5564}
5565#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005566static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005567#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005568
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005569struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005570{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005571 if (tr->dir)
5572 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005573
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005574 if (!debugfs_initialized())
5575 return NULL;
5576
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005577 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5578 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005579
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005580 if (!tr->dir)
5581 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005582
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005583 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005584}
5585
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005586struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005587{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005588 return tracing_init_dentry_tr(&global_trace);
5589}
5590
5591static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5592{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005593 struct dentry *d_tracer;
5594
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005595 if (tr->percpu_dir)
5596 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005597
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005598 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005599 if (!d_tracer)
5600 return NULL;
5601
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005602 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005603
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005604 WARN_ONCE(!tr->percpu_dir,
5605 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005606
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005607 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005608}
5609
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005610static struct dentry *
5611trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5612 void *data, long cpu, const struct file_operations *fops)
5613{
5614 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5615
5616 if (ret) /* See tracing_get_cpu() */
5617 ret->d_inode->i_cdev = (void *)(cpu + 1);
5618 return ret;
5619}
5620
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005621static void
5622tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005623{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005624 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005625 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005626 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005627
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005628 if (!d_percpu)
5629 return;
5630
Steven Rostedtdd49a382010-10-20 21:51:26 -04005631 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005632 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5633 if (!d_cpu) {
5634 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5635 return;
5636 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005637
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005638 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005639 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005640 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005641
5642 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005643 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005644 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005645
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005646 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005647 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005648
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005649 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005650 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005651
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005652 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005653 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005654
5655#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005656 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005657 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005658
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005659 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005660 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005661#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005662}
5663
Steven Rostedt60a11772008-05-12 21:20:44 +02005664#ifdef CONFIG_FTRACE_SELFTEST
5665/* Let selftest have access to static functions in this file */
5666#include "trace_selftest.c"
5667#endif
5668
Steven Rostedt577b7852009-02-26 23:43:05 -05005669struct trace_option_dentry {
5670 struct tracer_opt *opt;
5671 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005672 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005673 struct dentry *entry;
5674};
5675
5676static ssize_t
5677trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5678 loff_t *ppos)
5679{
5680 struct trace_option_dentry *topt = filp->private_data;
5681 char *buf;
5682
5683 if (topt->flags->val & topt->opt->bit)
5684 buf = "1\n";
5685 else
5686 buf = "0\n";
5687
5688 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5689}
5690
5691static ssize_t
5692trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5693 loff_t *ppos)
5694{
5695 struct trace_option_dentry *topt = filp->private_data;
5696 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005697 int ret;
5698
Peter Huewe22fe9b52011-06-07 21:58:27 +02005699 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5700 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005701 return ret;
5702
Li Zefan8d18eaa2009-12-08 11:17:06 +08005703 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005704 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005705
5706 if (!!(topt->flags->val & topt->opt->bit) != val) {
5707 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005708 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005709 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005710 mutex_unlock(&trace_types_lock);
5711 if (ret)
5712 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005713 }
5714
5715 *ppos += cnt;
5716
5717 return cnt;
5718}
5719
5720
5721static const struct file_operations trace_options_fops = {
5722 .open = tracing_open_generic,
5723 .read = trace_options_read,
5724 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005725 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005726};
5727
Steven Rostedta8259072009-02-26 22:19:12 -05005728static ssize_t
5729trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5730 loff_t *ppos)
5731{
5732 long index = (long)filp->private_data;
5733 char *buf;
5734
5735 if (trace_flags & (1 << index))
5736 buf = "1\n";
5737 else
5738 buf = "0\n";
5739
5740 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5741}
5742
5743static ssize_t
5744trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5745 loff_t *ppos)
5746{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005747 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005748 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005749 unsigned long val;
5750 int ret;
5751
Peter Huewe22fe9b52011-06-07 21:58:27 +02005752 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5753 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005754 return ret;
5755
Zhaoleif2d84b62009-08-07 18:55:48 +08005756 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005757 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005758
5759 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005760 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005761 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005762
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005763 if (ret < 0)
5764 return ret;
5765
Steven Rostedta8259072009-02-26 22:19:12 -05005766 *ppos += cnt;
5767
5768 return cnt;
5769}
5770
Steven Rostedta8259072009-02-26 22:19:12 -05005771static const struct file_operations trace_options_core_fops = {
5772 .open = tracing_open_generic,
5773 .read = trace_options_core_read,
5774 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005775 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005776};
5777
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005778struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005779 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005780 struct dentry *parent,
5781 void *data,
5782 const struct file_operations *fops)
5783{
5784 struct dentry *ret;
5785
5786 ret = debugfs_create_file(name, mode, parent, data, fops);
5787 if (!ret)
5788 pr_warning("Could not create debugfs '%s' entry\n", name);
5789
5790 return ret;
5791}
5792
5793
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005794static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005795{
5796 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005797
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005798 if (tr->options)
5799 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005800
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005801 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005802 if (!d_tracer)
5803 return NULL;
5804
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005805 tr->options = debugfs_create_dir("options", d_tracer);
5806 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005807 pr_warning("Could not create debugfs directory 'options'\n");
5808 return NULL;
5809 }
5810
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005811 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005812}
5813
Steven Rostedt577b7852009-02-26 23:43:05 -05005814static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005815create_trace_option_file(struct trace_array *tr,
5816 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005817 struct tracer_flags *flags,
5818 struct tracer_opt *opt)
5819{
5820 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005821
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005822 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005823 if (!t_options)
5824 return;
5825
5826 topt->flags = flags;
5827 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005828 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005829
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005830 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005831 &trace_options_fops);
5832
Steven Rostedt577b7852009-02-26 23:43:05 -05005833}
5834
5835static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005836create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005837{
5838 struct trace_option_dentry *topts;
5839 struct tracer_flags *flags;
5840 struct tracer_opt *opts;
5841 int cnt;
5842
5843 if (!tracer)
5844 return NULL;
5845
5846 flags = tracer->flags;
5847
5848 if (!flags || !flags->opts)
5849 return NULL;
5850
5851 opts = flags->opts;
5852
5853 for (cnt = 0; opts[cnt].name; cnt++)
5854 ;
5855
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005856 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005857 if (!topts)
5858 return NULL;
5859
5860 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005861 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005862 &opts[cnt]);
5863
5864 return topts;
5865}
5866
5867static void
5868destroy_trace_option_files(struct trace_option_dentry *topts)
5869{
5870 int cnt;
5871
5872 if (!topts)
5873 return;
5874
5875 for (cnt = 0; topts[cnt].opt; cnt++) {
5876 if (topts[cnt].entry)
5877 debugfs_remove(topts[cnt].entry);
5878 }
5879
5880 kfree(topts);
5881}
5882
Steven Rostedta8259072009-02-26 22:19:12 -05005883static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005884create_trace_option_core_file(struct trace_array *tr,
5885 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005886{
5887 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005888
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005889 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005890 if (!t_options)
5891 return NULL;
5892
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005893 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005894 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005895}
5896
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005897static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005898{
5899 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005900 int i;
5901
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005902 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005903 if (!t_options)
5904 return;
5905
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005906 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005907 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005908}
5909
Steven Rostedt499e5472012-02-22 15:50:28 -05005910static ssize_t
5911rb_simple_read(struct file *filp, char __user *ubuf,
5912 size_t cnt, loff_t *ppos)
5913{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005914 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005915 char buf[64];
5916 int r;
5917
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005918 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005919 r = sprintf(buf, "%d\n", r);
5920
5921 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5922}
5923
5924static ssize_t
5925rb_simple_write(struct file *filp, const char __user *ubuf,
5926 size_t cnt, loff_t *ppos)
5927{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005928 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005929 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005930 unsigned long val;
5931 int ret;
5932
5933 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5934 if (ret)
5935 return ret;
5936
5937 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005938 mutex_lock(&trace_types_lock);
5939 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005940 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005941 if (tr->current_trace->start)
5942 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005943 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005944 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005945 if (tr->current_trace->stop)
5946 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005947 }
5948 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005949 }
5950
5951 (*ppos)++;
5952
5953 return cnt;
5954}
5955
5956static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005957 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005958 .read = rb_simple_read,
5959 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005960 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005961 .llseek = default_llseek,
5962};
5963
Steven Rostedt277ba042012-08-03 16:10:49 -04005964struct dentry *trace_instance_dir;
5965
5966static void
5967init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5968
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005969static int
5970allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04005971{
5972 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005973
5974 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5975
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05005976 buf->tr = tr;
5977
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005978 buf->buffer = ring_buffer_alloc(size, rb_flags);
5979 if (!buf->buffer)
5980 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005981
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005982 buf->data = alloc_percpu(struct trace_array_cpu);
5983 if (!buf->data) {
5984 ring_buffer_free(buf->buffer);
5985 return -ENOMEM;
5986 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005987
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005988 /* Allocate the first page for all buffers */
5989 set_buffer_entries(&tr->trace_buffer,
5990 ring_buffer_size(tr->trace_buffer.buffer, 0));
5991
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005992 return 0;
5993}
5994
5995static int allocate_trace_buffers(struct trace_array *tr, int size)
5996{
5997 int ret;
5998
5999 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6000 if (ret)
6001 return ret;
6002
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006003#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006004 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6005 allocate_snapshot ? size : 1);
6006 if (WARN_ON(ret)) {
6007 ring_buffer_free(tr->trace_buffer.buffer);
6008 free_percpu(tr->trace_buffer.data);
6009 return -ENOMEM;
6010 }
6011 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006012
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006013 /*
6014 * Only the top level trace array gets its snapshot allocated
6015 * from the kernel command line.
6016 */
6017 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006018#endif
6019 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006020}
6021
6022static int new_instance_create(const char *name)
6023{
Steven Rostedt277ba042012-08-03 16:10:49 -04006024 struct trace_array *tr;
6025 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006026
6027 mutex_lock(&trace_types_lock);
6028
6029 ret = -EEXIST;
6030 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6031 if (tr->name && strcmp(tr->name, name) == 0)
6032 goto out_unlock;
6033 }
6034
6035 ret = -ENOMEM;
6036 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6037 if (!tr)
6038 goto out_unlock;
6039
6040 tr->name = kstrdup(name, GFP_KERNEL);
6041 if (!tr->name)
6042 goto out_free_tr;
6043
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006044 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6045 goto out_free_tr;
6046
6047 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6048
Steven Rostedt277ba042012-08-03 16:10:49 -04006049 raw_spin_lock_init(&tr->start_lock);
6050
6051 tr->current_trace = &nop_trace;
6052
6053 INIT_LIST_HEAD(&tr->systems);
6054 INIT_LIST_HEAD(&tr->events);
6055
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006056 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006057 goto out_free_tr;
6058
Steven Rostedt277ba042012-08-03 16:10:49 -04006059 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6060 if (!tr->dir)
6061 goto out_free_tr;
6062
6063 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006064 if (ret) {
6065 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006066 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006067 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006068
6069 init_tracer_debugfs(tr, tr->dir);
6070
6071 list_add(&tr->list, &ftrace_trace_arrays);
6072
6073 mutex_unlock(&trace_types_lock);
6074
6075 return 0;
6076
6077 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006078 if (tr->trace_buffer.buffer)
6079 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006080 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006081 kfree(tr->name);
6082 kfree(tr);
6083
6084 out_unlock:
6085 mutex_unlock(&trace_types_lock);
6086
6087 return ret;
6088
6089}
6090
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006091static int instance_delete(const char *name)
6092{
6093 struct trace_array *tr;
6094 int found = 0;
6095 int ret;
6096
6097 mutex_lock(&trace_types_lock);
6098
6099 ret = -ENODEV;
6100 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6101 if (tr->name && strcmp(tr->name, name) == 0) {
6102 found = 1;
6103 break;
6104 }
6105 }
6106 if (!found)
6107 goto out_unlock;
6108
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006109 ret = -EBUSY;
6110 if (tr->ref)
6111 goto out_unlock;
6112
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006113 list_del(&tr->list);
6114
6115 event_trace_del_tracer(tr);
6116 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006117 free_percpu(tr->trace_buffer.data);
6118 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006119
6120 kfree(tr->name);
6121 kfree(tr);
6122
6123 ret = 0;
6124
6125 out_unlock:
6126 mutex_unlock(&trace_types_lock);
6127
6128 return ret;
6129}
6130
Steven Rostedt277ba042012-08-03 16:10:49 -04006131static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6132{
6133 struct dentry *parent;
6134 int ret;
6135
6136 /* Paranoid: Make sure the parent is the "instances" directory */
6137 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6138 if (WARN_ON_ONCE(parent != trace_instance_dir))
6139 return -ENOENT;
6140
6141 /*
6142 * The inode mutex is locked, but debugfs_create_dir() will also
6143 * take the mutex. As the instances directory can not be destroyed
6144 * or changed in any other way, it is safe to unlock it, and
6145 * let the dentry try. If two users try to make the same dir at
6146 * the same time, then the new_instance_create() will determine the
6147 * winner.
6148 */
6149 mutex_unlock(&inode->i_mutex);
6150
6151 ret = new_instance_create(dentry->d_iname);
6152
6153 mutex_lock(&inode->i_mutex);
6154
6155 return ret;
6156}
6157
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006158static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6159{
6160 struct dentry *parent;
6161 int ret;
6162
6163 /* Paranoid: Make sure the parent is the "instances" directory */
6164 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6165 if (WARN_ON_ONCE(parent != trace_instance_dir))
6166 return -ENOENT;
6167
6168 /* The caller did a dget() on dentry */
6169 mutex_unlock(&dentry->d_inode->i_mutex);
6170
6171 /*
6172 * The inode mutex is locked, but debugfs_create_dir() will also
6173 * take the mutex. As the instances directory can not be destroyed
6174 * or changed in any other way, it is safe to unlock it, and
6175 * let the dentry try. If two users try to make the same dir at
6176 * the same time, then the instance_delete() will determine the
6177 * winner.
6178 */
6179 mutex_unlock(&inode->i_mutex);
6180
6181 ret = instance_delete(dentry->d_iname);
6182
6183 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6184 mutex_lock(&dentry->d_inode->i_mutex);
6185
6186 return ret;
6187}
6188
Steven Rostedt277ba042012-08-03 16:10:49 -04006189static const struct inode_operations instance_dir_inode_operations = {
6190 .lookup = simple_lookup,
6191 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006192 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006193};
6194
6195static __init void create_trace_instances(struct dentry *d_tracer)
6196{
6197 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6198 if (WARN_ON(!trace_instance_dir))
6199 return;
6200
6201 /* Hijack the dir inode operations, to allow mkdir */
6202 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6203}
6204
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006205static void
6206init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6207{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006208 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006209
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006210 trace_create_file("tracing_cpumask", 0644, d_tracer,
6211 tr, &tracing_cpumask_fops);
6212
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006213 trace_create_file("trace_options", 0644, d_tracer,
6214 tr, &tracing_iter_fops);
6215
6216 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006217 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006218
6219 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006220 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006221
6222 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006223 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006224
6225 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6226 tr, &tracing_total_entries_fops);
6227
Wang YanQing238ae932013-05-26 16:52:01 +08006228 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006229 tr, &tracing_free_buffer_fops);
6230
6231 trace_create_file("trace_marker", 0220, d_tracer,
6232 tr, &tracing_mark_fops);
6233
6234 trace_create_file("trace_clock", 0644, d_tracer, tr,
6235 &trace_clock_fops);
6236
6237 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006238 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006239
6240#ifdef CONFIG_TRACER_SNAPSHOT
6241 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006242 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006243#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006244
6245 for_each_tracing_cpu(cpu)
6246 tracing_init_debugfs_percpu(tr, cpu);
6247
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006248}
6249
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006250static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006251{
6252 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006253
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006254 trace_access_lock_init();
6255
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006256 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006257 if (!d_tracer)
6258 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006259
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006260 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006261
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006262 trace_create_file("available_tracers", 0444, d_tracer,
6263 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006264
Li Zefan339ae5d2009-04-17 10:34:30 +08006265 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006266 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006267
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04006268#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006269 trace_create_file("tracing_max_latency", 0644, d_tracer,
6270 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08006271#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006272
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006273 trace_create_file("tracing_thresh", 0644, d_tracer,
6274 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006275
Li Zefan339ae5d2009-04-17 10:34:30 +08006276 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006277 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006278
Avadh Patel69abe6a2009-04-10 16:04:48 -04006279 trace_create_file("saved_cmdlines", 0444, d_tracer,
6280 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006281
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006282#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006283 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6284 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006285#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006286
Steven Rostedt277ba042012-08-03 16:10:49 -04006287 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006288
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006289 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006290
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006291 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006292}
6293
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006294static int trace_panic_handler(struct notifier_block *this,
6295 unsigned long event, void *unused)
6296{
Steven Rostedt944ac422008-10-23 19:26:08 -04006297 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006298 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006299 return NOTIFY_OK;
6300}
6301
6302static struct notifier_block trace_panic_notifier = {
6303 .notifier_call = trace_panic_handler,
6304 .next = NULL,
6305 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6306};
6307
6308static int trace_die_handler(struct notifier_block *self,
6309 unsigned long val,
6310 void *data)
6311{
6312 switch (val) {
6313 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006314 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006315 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006316 break;
6317 default:
6318 break;
6319 }
6320 return NOTIFY_OK;
6321}
6322
6323static struct notifier_block trace_die_notifier = {
6324 .notifier_call = trace_die_handler,
6325 .priority = 200
6326};
6327
6328/*
6329 * printk is set to max of 1024, we really don't need it that big.
6330 * Nothing should be printing 1000 characters anyway.
6331 */
6332#define TRACE_MAX_PRINT 1000
6333
6334/*
6335 * Define here KERN_TRACE so that we have one place to modify
6336 * it if we decide to change what log level the ftrace dump
6337 * should be at.
6338 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006339#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006340
Jason Wessel955b61e2010-08-05 09:22:23 -05006341void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006342trace_printk_seq(struct trace_seq *s)
6343{
6344 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006345 if (s->len >= TRACE_MAX_PRINT)
6346 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006347
6348 /* should be zero ended, but we are paranoid. */
6349 s->buffer[s->len] = 0;
6350
6351 printk(KERN_TRACE "%s", s->buffer);
6352
Steven Rostedtf9520752009-03-02 14:04:40 -05006353 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006354}
6355
Jason Wessel955b61e2010-08-05 09:22:23 -05006356void trace_init_global_iter(struct trace_iterator *iter)
6357{
6358 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006359 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006360 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006361 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006362
6363 if (iter->trace && iter->trace->open)
6364 iter->trace->open(iter);
6365
6366 /* Annotate start of buffers if we had overruns */
6367 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6368 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6369
6370 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6371 if (trace_clocks[iter->tr->clock_id].in_ns)
6372 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006373}
6374
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006375void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006376{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006377 /* use static because iter can be a bit big for the stack */
6378 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006379 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006380 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006381 unsigned long flags;
6382 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006383
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006384 /* Only allow one dump user at a time. */
6385 if (atomic_inc_return(&dump_running) != 1) {
6386 atomic_dec(&dump_running);
6387 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006388 }
6389
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006390 /*
6391 * Always turn off tracing when we dump.
6392 * We don't need to show trace output of what happens
6393 * between multiple crashes.
6394 *
6395 * If the user does a sysrq-z, then they can re-enable
6396 * tracing with echo 1 > tracing_on.
6397 */
6398 tracing_off();
6399
6400 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006401
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006402 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006403 trace_init_global_iter(&iter);
6404
Steven Rostedtd7690412008-10-01 00:29:53 -04006405 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006406 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006407 }
6408
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006409 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6410
Török Edwinb54d3de2008-11-22 13:28:48 +02006411 /* don't look at user memory in panic mode */
6412 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6413
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006414 switch (oops_dump_mode) {
6415 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006416 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006417 break;
6418 case DUMP_ORIG:
6419 iter.cpu_file = raw_smp_processor_id();
6420 break;
6421 case DUMP_NONE:
6422 goto out_enable;
6423 default:
6424 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006425 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006426 }
6427
6428 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006429
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006430 /* Did function tracer already get disabled? */
6431 if (ftrace_is_dead()) {
6432 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6433 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6434 }
6435
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006436 /*
6437 * We need to stop all tracing on all CPUS to read the
6438 * the next buffer. This is a bit expensive, but is
6439 * not done often. We fill all what we can read,
6440 * and then release the locks again.
6441 */
6442
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006443 while (!trace_empty(&iter)) {
6444
6445 if (!cnt)
6446 printk(KERN_TRACE "---------------------------------\n");
6447
6448 cnt++;
6449
6450 /* reset all but tr, trace, and overruns */
6451 memset(&iter.seq, 0,
6452 sizeof(struct trace_iterator) -
6453 offsetof(struct trace_iterator, seq));
6454 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6455 iter.pos = -1;
6456
Jason Wessel955b61e2010-08-05 09:22:23 -05006457 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006458 int ret;
6459
6460 ret = print_trace_line(&iter);
6461 if (ret != TRACE_TYPE_NO_CONSUME)
6462 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006463 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006464 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006465
6466 trace_printk_seq(&iter.seq);
6467 }
6468
6469 if (!cnt)
6470 printk(KERN_TRACE " (ftrace buffer empty)\n");
6471 else
6472 printk(KERN_TRACE "---------------------------------\n");
6473
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006474 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006475 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006476
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006477 for_each_tracing_cpu(cpu) {
6478 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006479 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006480 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006481 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006482}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006483EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006484
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006485__init static int tracer_alloc_buffers(void)
6486{
Steven Rostedt73c51622009-03-11 13:42:01 -04006487 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306488 int ret = -ENOMEM;
6489
David Sharp750912f2010-12-08 13:46:47 -08006490
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306491 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6492 goto out;
6493
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006494 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306495 goto out_free_buffer_mask;
6496
Steven Rostedt07d777f2011-09-22 14:01:55 -04006497 /* Only allocate trace_printk buffers if a trace_printk exists */
6498 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006499 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006500 trace_printk_init_buffers();
6501
Steven Rostedt73c51622009-03-11 13:42:01 -04006502 /* To save memory, keep the ring buffer size to its minimum */
6503 if (ring_buffer_expanded)
6504 ring_buf_size = trace_buf_size;
6505 else
6506 ring_buf_size = 1;
6507
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306508 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006509 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006510
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006511 raw_spin_lock_init(&global_trace.start_lock);
6512
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006513 /* Used for event triggers */
6514 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6515 if (!temp_buffer)
6516 goto out_free_cpumask;
6517
Steven Rostedtab464282008-05-12 21:21:00 +02006518 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006519 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006520 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6521 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006522 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006523 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006524
Steven Rostedt499e5472012-02-22 15:50:28 -05006525 if (global_trace.buffer_disabled)
6526 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006527
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006528 trace_init_cmdlines();
6529
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006530 /*
6531 * register_tracer() might reference current_trace, so it
6532 * needs to be set before we register anything. This is
6533 * just a bootstrap of current_trace anyway.
6534 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006535 global_trace.current_trace = &nop_trace;
6536
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006537 register_tracer(&nop_trace);
6538
Steven Rostedt60a11772008-05-12 21:20:44 +02006539 /* All seems OK, enable tracing */
6540 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006541
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006542 atomic_notifier_chain_register(&panic_notifier_list,
6543 &trace_panic_notifier);
6544
6545 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006546
Steven Rostedtae63b312012-05-03 23:09:03 -04006547 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6548
6549 INIT_LIST_HEAD(&global_trace.systems);
6550 INIT_LIST_HEAD(&global_trace.events);
6551 list_add(&global_trace.list, &ftrace_trace_arrays);
6552
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006553 while (trace_boot_options) {
6554 char *option;
6555
6556 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006557 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006558 }
6559
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006560 register_snapshot_cmd();
6561
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006562 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006563
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006564out_free_temp_buffer:
6565 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306566out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006567 free_percpu(global_trace.trace_buffer.data);
6568#ifdef CONFIG_TRACER_MAX_TRACE
6569 free_percpu(global_trace.max_buffer.data);
6570#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006571 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306572out_free_buffer_mask:
6573 free_cpumask_var(tracing_buffer_mask);
6574out:
6575 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006576}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006577
6578__init static int clear_boot_tracer(void)
6579{
6580 /*
6581 * The default tracer at boot buffer is an init section.
6582 * This function is called in lateinit. If we did not
6583 * find the boot tracer, then clear it out, to prevent
6584 * later registration from accessing the buffer that is
6585 * about to be freed.
6586 */
6587 if (!default_bootup_tracer)
6588 return 0;
6589
6590 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6591 default_bootup_tracer);
6592 default_bootup_tracer = NULL;
6593
6594 return 0;
6595}
6596
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006597early_initcall(tracer_alloc_buffers);
6598fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006599late_initcall(clear_boot_tracer);