blob: 815c878f409bd94e08777d1b9f83b1553f4a2e24 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040082 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
Steven Rostedt0f048702008-11-05 16:05:44 -050089 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
Hannes Eder4fd27352009-02-10 19:44:12 +010094static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050095
Christoph Lameter9288f992009-10-07 19:17:45 -040096DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500121static int tracing_set_tracer(const char *buf);
122
Li Zefanee6c2c12009-09-18 14:06:47 +0800123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500125static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100126
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500127static bool allocate_snapshot;
128
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200129static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100130{
Chen Gang67012ab2013-04-08 12:06:44 +0800131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500132 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400133 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500134 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135 return 1;
136}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200137__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100138
Steven Rostedt944ac422008-10-23 19:26:08 -0400139static int __init set_ftrace_dump_on_oops(char *str)
140{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200141 if (*str++ != '=' || !*str) {
142 ftrace_dump_on_oops = DUMP_ALL;
143 return 1;
144 }
145
146 if (!strcmp("orig_cpu", str)) {
147 ftrace_dump_on_oops = DUMP_ORIG;
148 return 1;
149 }
150
151 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400152}
153__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200154
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400155static int __init stop_trace_on_warning(char *str)
156{
157 __disable_trace_on_warning = 1;
158 return 1;
159}
160__setup("traceoff_on_warning=", stop_trace_on_warning);
161
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400162static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500163{
164 allocate_snapshot = true;
165 /* We also need the main ring buffer expanded */
166 ring_buffer_expanded = true;
167 return 1;
168}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400169__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400171
172static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
173static char *trace_boot_options __initdata;
174
175static int __init set_trace_boot_options(char *str)
176{
Chen Gang67012ab2013-04-08 12:06:44 +0800177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400178 trace_boot_options = trace_boot_options_buf;
179 return 0;
180}
181__setup("trace_options=", set_trace_boot_options);
182
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400183
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800184unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200185{
186 nsec += 500;
187 do_div(nsec, 1000);
188 return nsec;
189}
190
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200191/*
192 * The global_trace is the descriptor that holds the tracing
193 * buffers for the live tracing. For each CPU, it contains
194 * a link list of pages that will store trace entries. The
195 * page descriptor of the pages in the memory is used to hold
196 * the link list by linking the lru item in the page descriptor
197 * to each of the pages in the buffer per CPU.
198 *
199 * For each active CPU there is a data field that holds the
200 * pages for the buffer for that CPU. Each CPU has the same number
201 * of pages allocated for its buffer.
202 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200203static struct trace_array global_trace;
204
Steven Rostedtae63b312012-05-03 23:09:03 -0400205LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200206
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400207int trace_array_get(struct trace_array *this_tr)
208{
209 struct trace_array *tr;
210 int ret = -ENODEV;
211
212 mutex_lock(&trace_types_lock);
213 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
214 if (tr == this_tr) {
215 tr->ref++;
216 ret = 0;
217 break;
218 }
219 }
220 mutex_unlock(&trace_types_lock);
221
222 return ret;
223}
224
225static void __trace_array_put(struct trace_array *this_tr)
226{
227 WARN_ON(!this_tr->ref);
228 this_tr->ref--;
229}
230
231void trace_array_put(struct trace_array *this_tr)
232{
233 mutex_lock(&trace_types_lock);
234 __trace_array_put(this_tr);
235 mutex_unlock(&trace_types_lock);
236}
237
Tom Zanussif306cc82013-10-24 08:34:17 -0500238int filter_check_discard(struct ftrace_event_file *file, void *rec,
239 struct ring_buffer *buffer,
240 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500241{
Tom Zanussif306cc82013-10-24 08:34:17 -0500242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
243 !filter_match_preds(file->filter, rec)) {
244 ring_buffer_discard_commit(buffer, event);
245 return 1;
246 }
247
248 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500249}
Tom Zanussif306cc82013-10-24 08:34:17 -0500250EXPORT_SYMBOL_GPL(filter_check_discard);
251
252int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
253 struct ring_buffer *buffer,
254 struct ring_buffer_event *event)
255{
256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
257 !filter_match_preds(call->filter, rec)) {
258 ring_buffer_discard_commit(buffer, event);
259 return 1;
260 }
261
262 return 0;
263}
264EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265
Alexander Z Lam94571582013-08-02 18:36:16 -0700266cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400267{
268 u64 ts;
269
270 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700271 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400272 return trace_clock_local();
273
Alexander Z Lam94571582013-08-02 18:36:16 -0700274 ts = ring_buffer_time_stamp(buf->buffer, cpu);
275 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400276
277 return ts;
278}
279
Alexander Z Lam94571582013-08-02 18:36:16 -0700280cycle_t ftrace_now(int cpu)
281{
282 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
283}
284
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400285/**
286 * tracing_is_enabled - Show if global_trace has been disabled
287 *
288 * Shows if the global trace has been enabled or not. It uses the
289 * mirror flag "buffer_disabled" to be used in fast paths such as for
290 * the irqsoff tracer. But it may be inaccurate due to races. If you
291 * need to know the accurate state, use tracing_is_on() which is a little
292 * slower, but accurate.
293 */
Steven Rostedt90369902008-11-05 16:05:44 -0500294int tracing_is_enabled(void)
295{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400296 /*
297 * For quick access (irqsoff uses this in fast path), just
298 * return the mirror variable of the state of the ring buffer.
299 * It's a little racy, but we don't really care.
300 */
301 smp_rmb();
302 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500303}
304
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200305/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400306 * trace_buf_size is the size in bytes that is allocated
307 * for a buffer. Note, the number of bytes is always rounded
308 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400309 *
310 * This number is purposely set to a low number of 16384.
311 * If the dump on oops happens, it will be much appreciated
312 * to not have to wait for all that output. Anyway this can be
313 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200314 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400315#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400316
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400317static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200318
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200319/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200320static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200321
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200322/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200323 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200324 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700325DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800327/*
328 * serialize the access of the ring buffer
329 *
330 * ring buffer serializes readers, but it is low level protection.
331 * The validity of the events (which returns by ring_buffer_peek() ..etc)
332 * are not protected by ring buffer.
333 *
334 * The content of events may become garbage if we allow other process consumes
335 * these events concurrently:
336 * A) the page of the consumed events may become a normal page
337 * (not reader page) in ring buffer, and this page will be rewrited
338 * by events producer.
339 * B) The page of the consumed events may become a page for splice_read,
340 * and this page will be returned to system.
341 *
342 * These primitives allow multi process access to different cpu ring buffer
343 * concurrently.
344 *
345 * These primitives don't distinguish read-only and read-consume access.
346 * Multi read-only access are also serialized.
347 */
348
349#ifdef CONFIG_SMP
350static DECLARE_RWSEM(all_cpu_access_lock);
351static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
352
353static inline void trace_access_lock(int cpu)
354{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500355 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800356 /* gain it for accessing the whole ring buffer. */
357 down_write(&all_cpu_access_lock);
358 } else {
359 /* gain it for accessing a cpu ring buffer. */
360
Steven Rostedtae3b5092013-01-23 15:22:59 -0500361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800362 down_read(&all_cpu_access_lock);
363
364 /* Secondly block other access to this @cpu ring buffer. */
365 mutex_lock(&per_cpu(cpu_access_lock, cpu));
366 }
367}
368
369static inline void trace_access_unlock(int cpu)
370{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500371 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800372 up_write(&all_cpu_access_lock);
373 } else {
374 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
375 up_read(&all_cpu_access_lock);
376 }
377}
378
379static inline void trace_access_lock_init(void)
380{
381 int cpu;
382
383 for_each_possible_cpu(cpu)
384 mutex_init(&per_cpu(cpu_access_lock, cpu));
385}
386
387#else
388
389static DEFINE_MUTEX(access_lock);
390
391static inline void trace_access_lock(int cpu)
392{
393 (void)cpu;
394 mutex_lock(&access_lock);
395}
396
397static inline void trace_access_unlock(int cpu)
398{
399 (void)cpu;
400 mutex_unlock(&access_lock);
401}
402
403static inline void trace_access_lock_init(void)
404{
405}
406
407#endif
408
Steven Rostedtee6bce52008-11-12 17:52:37 -0500409/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500410unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400411 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500412 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400413 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700414
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400415static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400416{
417 if (tr->trace_buffer.buffer)
418 ring_buffer_record_on(tr->trace_buffer.buffer);
419 /*
420 * This flag is looked at when buffers haven't been allocated
421 * yet, or by some tracers (like irqsoff), that just want to
422 * know if the ring buffer has been disabled, but it can handle
423 * races of where it gets disabled but we still do a record.
424 * As the check is in the fast path of the tracers, it is more
425 * important to be fast than accurate.
426 */
427 tr->buffer_disabled = 0;
428 /* Make the flag seen by readers */
429 smp_wmb();
430}
431
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200432/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500433 * tracing_on - enable tracing buffers
434 *
435 * This function enables tracing buffers that may have been
436 * disabled with tracing_off.
437 */
438void tracing_on(void)
439{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500441}
442EXPORT_SYMBOL_GPL(tracing_on);
443
444/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500445 * __trace_puts - write a constant string into the trace buffer.
446 * @ip: The address of the caller
447 * @str: The constant string to write
448 * @size: The size of the string.
449 */
450int __trace_puts(unsigned long ip, const char *str, int size)
451{
452 struct ring_buffer_event *event;
453 struct ring_buffer *buffer;
454 struct print_entry *entry;
455 unsigned long irq_flags;
456 int alloc;
457
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500458 if (unlikely(tracing_selftest_running || tracing_disabled))
459 return 0;
460
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500461 alloc = sizeof(*entry) + size + 2; /* possible \n added */
462
463 local_save_flags(irq_flags);
464 buffer = global_trace.trace_buffer.buffer;
465 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
466 irq_flags, preempt_count());
467 if (!event)
468 return 0;
469
470 entry = ring_buffer_event_data(event);
471 entry->ip = ip;
472
473 memcpy(&entry->buf, str, size);
474
475 /* Add a newline if necessary */
476 if (entry->buf[size - 1] != '\n') {
477 entry->buf[size] = '\n';
478 entry->buf[size + 1] = '\0';
479 } else
480 entry->buf[size] = '\0';
481
482 __buffer_unlock_commit(buffer, event);
483
484 return size;
485}
486EXPORT_SYMBOL_GPL(__trace_puts);
487
488/**
489 * __trace_bputs - write the pointer to a constant string into trace buffer
490 * @ip: The address of the caller
491 * @str: The constant string to write to the buffer to
492 */
493int __trace_bputs(unsigned long ip, const char *str)
494{
495 struct ring_buffer_event *event;
496 struct ring_buffer *buffer;
497 struct bputs_entry *entry;
498 unsigned long irq_flags;
499 int size = sizeof(struct bputs_entry);
500
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500501 if (unlikely(tracing_selftest_running || tracing_disabled))
502 return 0;
503
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500504 local_save_flags(irq_flags);
505 buffer = global_trace.trace_buffer.buffer;
506 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
507 irq_flags, preempt_count());
508 if (!event)
509 return 0;
510
511 entry = ring_buffer_event_data(event);
512 entry->ip = ip;
513 entry->str = str;
514
515 __buffer_unlock_commit(buffer, event);
516
517 return 1;
518}
519EXPORT_SYMBOL_GPL(__trace_bputs);
520
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500521#ifdef CONFIG_TRACER_SNAPSHOT
522/**
523 * trace_snapshot - take a snapshot of the current buffer.
524 *
525 * This causes a swap between the snapshot buffer and the current live
526 * tracing buffer. You can use this to take snapshots of the live
527 * trace when some condition is triggered, but continue to trace.
528 *
529 * Note, make sure to allocate the snapshot with either
530 * a tracing_snapshot_alloc(), or by doing it manually
531 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
532 *
533 * If the snapshot buffer is not allocated, it will stop tracing.
534 * Basically making a permanent snapshot.
535 */
536void tracing_snapshot(void)
537{
538 struct trace_array *tr = &global_trace;
539 struct tracer *tracer = tr->current_trace;
540 unsigned long flags;
541
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500542 if (in_nmi()) {
543 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
544 internal_trace_puts("*** snapshot is being ignored ***\n");
545 return;
546 }
547
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500548 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500549 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
550 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500551 tracing_off();
552 return;
553 }
554
555 /* Note, snapshot can not be used when the tracer uses it */
556 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500557 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
558 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559 return;
560 }
561
562 local_irq_save(flags);
563 update_max_tr(tr, current, smp_processor_id());
564 local_irq_restore(flags);
565}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500566EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500567
568static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
569 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400570static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
571
572static int alloc_snapshot(struct trace_array *tr)
573{
574 int ret;
575
576 if (!tr->allocated_snapshot) {
577
578 /* allocate spare buffer */
579 ret = resize_buffer_duplicate_size(&tr->max_buffer,
580 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
581 if (ret < 0)
582 return ret;
583
584 tr->allocated_snapshot = true;
585 }
586
587 return 0;
588}
589
590void free_snapshot(struct trace_array *tr)
591{
592 /*
593 * We don't free the ring buffer. instead, resize it because
594 * The max_tr ring buffer has some state (e.g. ring->clock) and
595 * we want preserve it.
596 */
597 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
598 set_buffer_entries(&tr->max_buffer, 1);
599 tracing_reset_online_cpus(&tr->max_buffer);
600 tr->allocated_snapshot = false;
601}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500602
603/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500604 * tracing_alloc_snapshot - allocate snapshot buffer.
605 *
606 * This only allocates the snapshot buffer if it isn't already
607 * allocated - it doesn't also take a snapshot.
608 *
609 * This is meant to be used in cases where the snapshot buffer needs
610 * to be set up for events that can't sleep but need to be able to
611 * trigger a snapshot.
612 */
613int tracing_alloc_snapshot(void)
614{
615 struct trace_array *tr = &global_trace;
616 int ret;
617
618 ret = alloc_snapshot(tr);
619 WARN_ON(ret < 0);
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
624
625/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
627 *
628 * This is similar to trace_snapshot(), but it will allocate the
629 * snapshot buffer if it isn't already allocated. Use this only
630 * where it is safe to sleep, as the allocation may sleep.
631 *
632 * This causes a swap between the snapshot buffer and the current live
633 * tracing buffer. You can use this to take snapshots of the live
634 * trace when some condition is triggered, but continue to trace.
635 */
636void tracing_snapshot_alloc(void)
637{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 int ret;
639
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500640 ret = tracing_alloc_snapshot();
641 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400642 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500643
644 tracing_snapshot();
645}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500646EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500647#else
648void tracing_snapshot(void)
649{
650 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
651}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500652EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500653int tracing_alloc_snapshot(void)
654{
655 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
656 return -ENODEV;
657}
658EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659void tracing_snapshot_alloc(void)
660{
661 /* Give warning */
662 tracing_snapshot();
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500665#endif /* CONFIG_TRACER_SNAPSHOT */
666
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400667static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400668{
669 if (tr->trace_buffer.buffer)
670 ring_buffer_record_off(tr->trace_buffer.buffer);
671 /*
672 * This flag is looked at when buffers haven't been allocated
673 * yet, or by some tracers (like irqsoff), that just want to
674 * know if the ring buffer has been disabled, but it can handle
675 * races of where it gets disabled but we still do a record.
676 * As the check is in the fast path of the tracers, it is more
677 * important to be fast than accurate.
678 */
679 tr->buffer_disabled = 1;
680 /* Make the flag seen by readers */
681 smp_wmb();
682}
683
Steven Rostedt499e5472012-02-22 15:50:28 -0500684/**
685 * tracing_off - turn off tracing buffers
686 *
687 * This function stops the tracing buffers from recording data.
688 * It does not disable any overhead the tracers themselves may
689 * be causing. This function simply causes all recording to
690 * the ring buffers to fail.
691 */
692void tracing_off(void)
693{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500695}
696EXPORT_SYMBOL_GPL(tracing_off);
697
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400698void disable_trace_on_warning(void)
699{
700 if (__disable_trace_on_warning)
701 tracing_off();
702}
703
Steven Rostedt499e5472012-02-22 15:50:28 -0500704/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400705 * tracer_tracing_is_on - show real state of ring buffer enabled
706 * @tr : the trace array to know if ring buffer is enabled
707 *
708 * Shows real state of the ring buffer if it is enabled or not.
709 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400710static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400711{
712 if (tr->trace_buffer.buffer)
713 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
714 return !tr->buffer_disabled;
715}
716
Steven Rostedt499e5472012-02-22 15:50:28 -0500717/**
718 * tracing_is_on - show state of ring buffers enabled
719 */
720int tracing_is_on(void)
721{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400722 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500723}
724EXPORT_SYMBOL_GPL(tracing_is_on);
725
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400726static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200727{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400728 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200729
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200730 if (!str)
731 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800732 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200733 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800734 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200735 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400736 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200737 return 1;
738}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400739__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200740
Tim Bird0e950172010-02-25 15:36:43 -0800741static int __init set_tracing_thresh(char *str)
742{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800743 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800744 int ret;
745
746 if (!str)
747 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200748 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800749 if (ret < 0)
750 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800751 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800752 return 1;
753}
754__setup("tracing_thresh=", set_tracing_thresh);
755
Steven Rostedt57f50be2008-05-12 21:20:44 +0200756unsigned long nsecs_to_usecs(unsigned long nsecs)
757{
758 return nsecs / 1000;
759}
760
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200761/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200762static const char *trace_options[] = {
763 "print-parent",
764 "sym-offset",
765 "sym-addr",
766 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200767 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200768 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200769 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200770 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200771 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100772 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500773 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500774 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500775 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200776 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200777 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100778 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200779 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500780 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400781 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400782 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800783 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800784 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400785 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500786 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700787 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400788 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200789 NULL
790};
791
Zhaolei5079f322009-08-25 16:12:56 +0800792static struct {
793 u64 (*func)(void);
794 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800795 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800796} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800797 { trace_clock_local, "local", 1 },
798 { trace_clock_global, "global", 1 },
799 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400800 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400801 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800802 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800803};
804
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200805/*
806 * trace_parser_get_init - gets the buffer for trace parser
807 */
808int trace_parser_get_init(struct trace_parser *parser, int size)
809{
810 memset(parser, 0, sizeof(*parser));
811
812 parser->buffer = kmalloc(size, GFP_KERNEL);
813 if (!parser->buffer)
814 return 1;
815
816 parser->size = size;
817 return 0;
818}
819
820/*
821 * trace_parser_put - frees the buffer for trace parser
822 */
823void trace_parser_put(struct trace_parser *parser)
824{
825 kfree(parser->buffer);
826}
827
828/*
829 * trace_get_user - reads the user input string separated by space
830 * (matched by isspace(ch))
831 *
832 * For each string found the 'struct trace_parser' is updated,
833 * and the function returns.
834 *
835 * Returns number of bytes read.
836 *
837 * See kernel/trace/trace.h for 'struct trace_parser' details.
838 */
839int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
840 size_t cnt, loff_t *ppos)
841{
842 char ch;
843 size_t read = 0;
844 ssize_t ret;
845
846 if (!*ppos)
847 trace_parser_clear(parser);
848
849 ret = get_user(ch, ubuf++);
850 if (ret)
851 goto out;
852
853 read++;
854 cnt--;
855
856 /*
857 * The parser is not finished with the last write,
858 * continue reading the user input without skipping spaces.
859 */
860 if (!parser->cont) {
861 /* skip white space */
862 while (cnt && isspace(ch)) {
863 ret = get_user(ch, ubuf++);
864 if (ret)
865 goto out;
866 read++;
867 cnt--;
868 }
869
870 /* only spaces were written */
871 if (isspace(ch)) {
872 *ppos += read;
873 ret = read;
874 goto out;
875 }
876
877 parser->idx = 0;
878 }
879
880 /* read the non-space input */
881 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800882 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200883 parser->buffer[parser->idx++] = ch;
884 else {
885 ret = -EINVAL;
886 goto out;
887 }
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891 read++;
892 cnt--;
893 }
894
895 /* We either got finished input or we have to wait for another call. */
896 if (isspace(ch)) {
897 parser->buffer[parser->idx] = 0;
898 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400899 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200900 parser->cont = true;
901 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400902 } else {
903 ret = -EINVAL;
904 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200905 }
906
907 *ppos += read;
908 ret = read;
909
910out:
911 return ret;
912}
913
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200914ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
915{
916 int len;
917 int ret;
918
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500919 if (!cnt)
920 return 0;
921
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200922 if (s->len <= s->readpos)
923 return -EBUSY;
924
925 len = s->len - s->readpos;
926 if (cnt > len)
927 cnt = len;
928 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500929 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200930 return -EFAULT;
931
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500932 cnt -= ret;
933
Steven Rostedte74da522009-03-04 20:31:11 -0500934 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200935 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200936}
937
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200938static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200939{
940 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200941
942 if (s->len <= s->readpos)
943 return -EBUSY;
944
945 len = s->len - s->readpos;
946 if (cnt > len)
947 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300948 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200949
Steven Rostedte74da522009-03-04 20:31:11 -0500950 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951 return cnt;
952}
953
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400954/*
955 * ftrace_max_lock is used to protect the swapping of buffers
956 * when taking a max snapshot. The buffers themselves are
957 * protected by per_cpu spinlocks. But the action of the swap
958 * needs its own lock.
959 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100960 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400961 * with performance when lockdep debugging is enabled.
962 *
963 * It is also used in other places outside the update_max_tr
964 * so it needs to be defined outside of the
965 * CONFIG_TRACER_MAX_TRACE.
966 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100967static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100968 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
973unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974
975/*
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 */
980static void
981__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
982{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500983 struct trace_buffer *trace_buf = &tr->trace_buffer;
984 struct trace_buffer *max_buf = &tr->max_buffer;
985 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
986 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500988 max_buf->cpu = cpu;
989 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400990
Steven Rostedt8248ac02009-09-02 12:27:41 -0400991 max_data->saved_latency = tracing_max_latency;
992 max_data->critical_start = data->critical_start;
993 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300995 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400996 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400997 /*
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1000 */
1001 if (tsk == current)
1002 max_data->uid = current_uid();
1003 else
1004 max_data->uid = task_uid(tsk);
1005
Steven Rostedt8248ac02009-09-02 12:27:41 -04001006 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1007 max_data->policy = tsk->policy;
1008 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001009
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk);
1012}
1013
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001014/**
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1016 * @tr: tracer
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1019 *
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1022 */
Ingo Molnare309b412008-05-12 21:20:51 +02001023void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001024update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1025{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001026 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001027
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001028 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001029 return;
1030
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001031 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001033 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001034 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001035 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001036 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001037 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001038
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001039 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001040
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001041 buf = tr->trace_buffer.buffer;
1042 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1043 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001046 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001047}
1048
1049/**
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1051 * @tr - tracer
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001054 *
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056 */
Ingo Molnare309b412008-05-12 21:20:51 +02001057void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001062 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001063 return;
1064
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001065 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001066 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001067 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001070 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001071
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001072 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001074 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001075
Steven Rostedte8165db2009-09-03 19:13:05 -04001076 if (ret == -EBUSY) {
1077 /*
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1082 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001083 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001084 "Failed to swap buffers due to commit in progress\n");
1085 }
1086
Steven Rostedte8165db2009-09-03 19:13:05 -04001087 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088
1089 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001090 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001092#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001094static void default_wait_pipe(struct trace_iterator *iter)
1095{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter, iter->cpu_file))
1098 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001099
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001100 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001101}
1102
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001103#ifdef CONFIG_FTRACE_STARTUP_TEST
1104static int run_tracer_selftest(struct tracer *type)
1105{
1106 struct trace_array *tr = &global_trace;
1107 struct tracer *saved_tracer = tr->current_trace;
1108 int ret;
1109
1110 if (!type->selftest || tracing_selftest_disabled)
1111 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112
1113 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 * Run a selftest on this tracer.
1115 * Here we reset the trace buffer, and set the current
1116 * tracer to be this tracer. The tracer can then run some
1117 * internal tracing to verify that everything is in order.
1118 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001119 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001120 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001121
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001122 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001124#ifdef CONFIG_TRACER_MAX_TRACE
1125 if (type->use_max_tr) {
1126 /* If we expanded the buffers, make sure the max is expanded too */
1127 if (ring_buffer_expanded)
1128 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1129 RING_BUFFER_ALL_CPUS);
1130 tr->allocated_snapshot = true;
1131 }
1132#endif
1133
1134 /* the test is responsible for initializing and enabling */
1135 pr_info("Testing tracer %s: ", type->name);
1136 ret = type->selftest(type, tr);
1137 /* the test is responsible for resetting too */
1138 tr->current_trace = saved_tracer;
1139 if (ret) {
1140 printk(KERN_CONT "FAILED!\n");
1141 /* Add the warning after printing 'FAILED' */
1142 WARN_ON(1);
1143 return -1;
1144 }
1145 /* Only reset on passing, to avoid touching corrupted buffers */
1146 tracing_reset_online_cpus(&tr->trace_buffer);
1147
1148#ifdef CONFIG_TRACER_MAX_TRACE
1149 if (type->use_max_tr) {
1150 tr->allocated_snapshot = false;
1151
1152 /* Shrink the max buffer again */
1153 if (ring_buffer_expanded)
1154 ring_buffer_resize(tr->max_buffer.buffer, 1,
1155 RING_BUFFER_ALL_CPUS);
1156 }
1157#endif
1158
1159 printk(KERN_CONT "PASSED\n");
1160 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001161}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001162#else
1163static inline int run_tracer_selftest(struct tracer *type)
1164{
1165 return 0;
1166}
1167#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001168
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001169/**
1170 * register_tracer - register a tracer with the ftrace system.
1171 * @type - the plugin for the tracer
1172 *
1173 * Register a new plugin tracer.
1174 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001175int register_tracer(struct tracer *type)
1176{
1177 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178 int ret = 0;
1179
1180 if (!type->name) {
1181 pr_info("Tracer must have a name\n");
1182 return -1;
1183 }
1184
Dan Carpenter24a461d2010-07-10 12:06:44 +02001185 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001186 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1187 return -1;
1188 }
1189
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001190 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001191
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001192 tracing_selftest_running = true;
1193
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001194 for (t = trace_types; t; t = t->next) {
1195 if (strcmp(type->name, t->name) == 0) {
1196 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001197 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001198 type->name);
1199 ret = -1;
1200 goto out;
1201 }
1202 }
1203
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001204 if (!type->set_flag)
1205 type->set_flag = &dummy_set_flag;
1206 if (!type->flags)
1207 type->flags = &dummy_tracer_flags;
1208 else
1209 if (!type->flags->opts)
1210 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001211 if (!type->wait_pipe)
1212 type->wait_pipe = default_wait_pipe;
1213
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001214 ret = run_tracer_selftest(type);
1215 if (ret < 0)
1216 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 type->next = trace_types;
1219 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001220
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001221 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001222 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001223 mutex_unlock(&trace_types_lock);
1224
Steven Rostedtdac74942009-02-05 01:13:38 -05001225 if (ret || !default_bootup_tracer)
1226 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001227
Li Zefanee6c2c12009-09-18 14:06:47 +08001228 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001229 goto out_unlock;
1230
1231 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1232 /* Do we want this tracer to start on bootup? */
1233 tracing_set_tracer(type->name);
1234 default_bootup_tracer = NULL;
1235 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001236 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001237#ifdef CONFIG_FTRACE_STARTUP_TEST
1238 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1239 type->name);
1240#endif
1241
1242 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001243 return ret;
1244}
1245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001247{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001248 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001249
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001250 if (!buffer)
1251 return;
1252
Steven Rostedtf6339032009-09-04 12:35:16 -04001253 ring_buffer_record_disable(buffer);
1254
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001257 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001258
1259 ring_buffer_record_enable(buffer);
1260}
1261
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001262void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001263{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001264 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001265 int cpu;
1266
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001267 if (!buffer)
1268 return;
1269
Steven Rostedt621968c2009-09-04 12:02:35 -04001270 ring_buffer_record_disable(buffer);
1271
1272 /* Make sure all commits have finished */
1273 synchronize_sched();
1274
Alexander Z Lam94571582013-08-02 18:36:16 -07001275 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001276
1277 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001278 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001279
1280 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001281}
1282
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001283/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001284void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001285{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001286 struct trace_array *tr;
1287
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001289 tracing_reset_online_cpus(&tr->trace_buffer);
1290#ifdef CONFIG_TRACER_MAX_TRACE
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001293 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001294}
1295
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001296#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001297#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1299static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1300static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1301static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001302static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001303
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001305static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306
1307static void trace_init_cmdlines(void)
1308{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001309 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1310 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001311 cmdline_idx = 0;
1312}
1313
Carsten Emdeb5130b12009-09-13 01:43:07 +02001314int is_tracing_stopped(void)
1315{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001316 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001317}
1318
Steven Rostedt0f048702008-11-05 16:05:44 -05001319/**
1320 * tracing_start - quick start of the tracer
1321 *
1322 * If tracing is enabled but was stopped by tracing_stop,
1323 * this will start the tracer back up.
1324 */
1325void tracing_start(void)
1326{
1327 struct ring_buffer *buffer;
1328 unsigned long flags;
1329
1330 if (tracing_disabled)
1331 return;
1332
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001333 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1334 if (--global_trace.stop_count) {
1335 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001336 /* Someone screwed up their debugging */
1337 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001338 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001339 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001340 goto out;
1341 }
1342
Steven Rostedta2f80712010-03-12 19:56:00 -05001343 /* Prevent the buffers from switching */
1344 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001345
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001347 if (buffer)
1348 ring_buffer_record_enable(buffer);
1349
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001350#ifdef CONFIG_TRACER_MAX_TRACE
1351 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001352 if (buffer)
1353 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001354#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001355
Steven Rostedta2f80712010-03-12 19:56:00 -05001356 arch_spin_unlock(&ftrace_max_lock);
1357
Steven Rostedt0f048702008-11-05 16:05:44 -05001358 ftrace_start();
1359 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001360 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1361}
1362
1363static void tracing_start_tr(struct trace_array *tr)
1364{
1365 struct ring_buffer *buffer;
1366 unsigned long flags;
1367
1368 if (tracing_disabled)
1369 return;
1370
1371 /* If global, we need to also start the max tracer */
1372 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1373 return tracing_start();
1374
1375 raw_spin_lock_irqsave(&tr->start_lock, flags);
1376
1377 if (--tr->stop_count) {
1378 if (tr->stop_count < 0) {
1379 /* Someone screwed up their debugging */
1380 WARN_ON_ONCE(1);
1381 tr->stop_count = 0;
1382 }
1383 goto out;
1384 }
1385
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001386 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001387 if (buffer)
1388 ring_buffer_record_enable(buffer);
1389
1390 out:
1391 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001392}
1393
1394/**
1395 * tracing_stop - quick stop of the tracer
1396 *
1397 * Light weight way to stop tracing. Use in conjunction with
1398 * tracing_start.
1399 */
1400void tracing_stop(void)
1401{
1402 struct ring_buffer *buffer;
1403 unsigned long flags;
1404
1405 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001406 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1407 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001408 goto out;
1409
Steven Rostedta2f80712010-03-12 19:56:00 -05001410 /* Prevent the buffers from switching */
1411 arch_spin_lock(&ftrace_max_lock);
1412
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001414 if (buffer)
1415 ring_buffer_record_disable(buffer);
1416
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001417#ifdef CONFIG_TRACER_MAX_TRACE
1418 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001419 if (buffer)
1420 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001421#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001422
Steven Rostedta2f80712010-03-12 19:56:00 -05001423 arch_spin_unlock(&ftrace_max_lock);
1424
Steven Rostedt0f048702008-11-05 16:05:44 -05001425 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001426 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1427}
1428
1429static void tracing_stop_tr(struct trace_array *tr)
1430{
1431 struct ring_buffer *buffer;
1432 unsigned long flags;
1433
1434 /* If global, we need to also stop the max tracer */
1435 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1436 return tracing_stop();
1437
1438 raw_spin_lock_irqsave(&tr->start_lock, flags);
1439 if (tr->stop_count++)
1440 goto out;
1441
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001442 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001443 if (buffer)
1444 ring_buffer_record_disable(buffer);
1445
1446 out:
1447 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001448}
1449
Ingo Molnare309b412008-05-12 21:20:51 +02001450void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001451
Ingo Molnare309b412008-05-12 21:20:51 +02001452static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001453{
Carsten Emdea635cf02009-03-18 09:00:41 +01001454 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001455
1456 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1457 return;
1458
1459 /*
1460 * It's not the end of the world if we don't get
1461 * the lock, but we also don't want to spin
1462 * nor do we want to disable interrupts,
1463 * so if we miss here, then better luck next time.
1464 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001465 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001466 return;
1467
1468 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001469 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1471
Carsten Emdea635cf02009-03-18 09:00:41 +01001472 /*
1473 * Check whether the cmdline buffer at idx has a pid
1474 * mapped. We are going to overwrite that entry so we
1475 * need to clear the map_pid_to_cmdline. Otherwise we
1476 * would read the new comm for the old pid.
1477 */
1478 pid = map_cmdline_to_pid[idx];
1479 if (pid != NO_CMDLINE_MAP)
1480 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481
Carsten Emdea635cf02009-03-18 09:00:41 +01001482 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483 map_pid_to_cmdline[tsk->pid] = idx;
1484
1485 cmdline_idx = idx;
1486 }
1487
1488 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1489
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001490 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001491}
1492
Steven Rostedt4ca53082009-03-16 19:20:15 -04001493void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495 unsigned map;
1496
Steven Rostedt4ca53082009-03-16 19:20:15 -04001497 if (!pid) {
1498 strcpy(comm, "<idle>");
1499 return;
1500 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501
Steven Rostedt74bf4072010-01-25 15:11:53 -05001502 if (WARN_ON_ONCE(pid < 0)) {
1503 strcpy(comm, "<XXX>");
1504 return;
1505 }
1506
Steven Rostedt4ca53082009-03-16 19:20:15 -04001507 if (pid > PID_MAX_DEFAULT) {
1508 strcpy(comm, "<...>");
1509 return;
1510 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001512 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001513 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001515 if (map != NO_CMDLINE_MAP)
1516 strcpy(comm, saved_cmdlines[map]);
1517 else
1518 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001520 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001521 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522}
1523
Ingo Molnare309b412008-05-12 21:20:51 +02001524void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001526 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 return;
1528
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001529 if (!__this_cpu_read(trace_cmdline_save))
1530 return;
1531
1532 __this_cpu_write(trace_cmdline_save, false);
1533
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534 trace_save_cmdline(tsk);
1535}
1536
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001537void
Steven Rostedt38697052008-10-01 13:14:09 -04001538tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1539 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
1541 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Steven Rostedt777e2082008-09-29 23:02:42 -04001543 entry->preempt_count = pc & 0xff;
1544 entry->pid = (tsk) ? tsk->pid : 0;
1545 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001546#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001547 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001548#else
1549 TRACE_FLAG_IRQS_NOSUPPORT |
1550#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001551 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1552 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001553 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1554 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001555}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001556EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Steven Rostedte77405a2009-09-02 14:17:06 -04001558struct ring_buffer_event *
1559trace_buffer_lock_reserve(struct ring_buffer *buffer,
1560 int type,
1561 unsigned long len,
1562 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001563{
1564 struct ring_buffer_event *event;
1565
Steven Rostedte77405a2009-09-02 14:17:06 -04001566 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001567 if (event != NULL) {
1568 struct trace_entry *ent = ring_buffer_event_data(event);
1569
1570 tracing_generic_entry_update(ent, flags, pc);
1571 ent->type = type;
1572 }
1573
1574 return event;
1575}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001576
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001577void
1578__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1579{
1580 __this_cpu_write(trace_cmdline_save, true);
1581 ring_buffer_unlock_commit(buffer, event);
1582}
1583
Steven Rostedte77405a2009-09-02 14:17:06 -04001584static inline void
1585__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1586 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001587 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001588{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001589 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001590
Steven Rostedte77405a2009-09-02 14:17:06 -04001591 ftrace_trace_stack(buffer, flags, 6, pc);
1592 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001593}
1594
Steven Rostedte77405a2009-09-02 14:17:06 -04001595void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
1597 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001598{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001599 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001600}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001602
Steven Rostedtef5580d2009-02-27 19:38:04 -05001603struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001604trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1605 struct ftrace_event_file *ftrace_file,
1606 int type, unsigned long len,
1607 unsigned long flags, int pc)
1608{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001609 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001610 return trace_buffer_lock_reserve(*current_rb,
1611 type, len, flags, pc);
1612}
1613EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1614
1615struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001616trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1617 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001618 unsigned long flags, int pc)
1619{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001620 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001621 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001622 type, len, flags, pc);
1623}
Steven Rostedt94487d62009-05-05 19:22:53 -04001624EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001625
Steven Rostedte77405a2009-09-02 14:17:06 -04001626void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1627 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001628 unsigned long flags, int pc)
1629{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001630 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001631}
Steven Rostedt94487d62009-05-05 19:22:53 -04001632EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001633
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001634void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
1636 unsigned long flags, int pc,
1637 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001638{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001639 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001640
1641 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1642 ftrace_trace_userstack(buffer, flags, pc);
1643}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001644EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001645
Steven Rostedte77405a2009-09-02 14:17:06 -04001646void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001648{
Steven Rostedte77405a2009-09-02 14:17:06 -04001649 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001650}
Steven Rostedt12acd472009-04-17 16:01:56 -04001651EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001652
Ingo Molnare309b412008-05-12 21:20:51 +02001653void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001654trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001655 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1656 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657{
Tom Zanussie1112b42009-03-31 00:48:49 -05001658 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001659 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001660 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001661 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001662
Steven Rostedtd7690412008-10-01 00:29:53 -04001663 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001664 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001665 return;
1666
Steven Rostedte77405a2009-09-02 14:17:06 -04001667 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001668 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001669 if (!event)
1670 return;
1671 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001672 entry->ip = ip;
1673 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001674
Tom Zanussif306cc82013-10-24 08:34:17 -05001675 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001676 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677}
1678
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001679#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001680
1681#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1682struct ftrace_stack {
1683 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1684};
1685
1686static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1687static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1688
Steven Rostedte77405a2009-09-02 14:17:06 -04001689static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001690 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001691 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001692{
Tom Zanussie1112b42009-03-31 00:48:49 -05001693 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001694 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001695 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001696 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001697 int use_stack;
1698 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001699
1700 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001701 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001702
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001703 /*
1704 * Since events can happen in NMIs there's no safe way to
1705 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1706 * or NMI comes in, it will just have to use the default
1707 * FTRACE_STACK_SIZE.
1708 */
1709 preempt_disable_notrace();
1710
Shan Wei82146522012-11-19 13:21:01 +08001711 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001712 /*
1713 * We don't need any atomic variables, just a barrier.
1714 * If an interrupt comes in, we don't care, because it would
1715 * have exited and put the counter back to what we want.
1716 * We just need a barrier to keep gcc from moving things
1717 * around.
1718 */
1719 barrier();
1720 if (use_stack == 1) {
1721 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1722 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1723
1724 if (regs)
1725 save_stack_trace_regs(regs, &trace);
1726 else
1727 save_stack_trace(&trace);
1728
1729 if (trace.nr_entries > size)
1730 size = trace.nr_entries;
1731 } else
1732 /* From now on, use_stack is a boolean */
1733 use_stack = 0;
1734
1735 size *= sizeof(unsigned long);
1736
1737 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1738 sizeof(*entry) + size, flags, pc);
1739 if (!event)
1740 goto out;
1741 entry = ring_buffer_event_data(event);
1742
1743 memset(&entry->caller, 0, size);
1744
1745 if (use_stack)
1746 memcpy(&entry->caller, trace.entries,
1747 trace.nr_entries * sizeof(unsigned long));
1748 else {
1749 trace.max_entries = FTRACE_STACK_ENTRIES;
1750 trace.entries = entry->caller;
1751 if (regs)
1752 save_stack_trace_regs(regs, &trace);
1753 else
1754 save_stack_trace(&trace);
1755 }
1756
1757 entry->size = trace.nr_entries;
1758
Tom Zanussif306cc82013-10-24 08:34:17 -05001759 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001760 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001761
1762 out:
1763 /* Again, don't let gcc optimize things here */
1764 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001765 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001766 preempt_enable_notrace();
1767
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001768}
1769
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001770void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1771 int skip, int pc, struct pt_regs *regs)
1772{
1773 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1774 return;
1775
1776 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1777}
1778
Steven Rostedte77405a2009-09-02 14:17:06 -04001779void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1780 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001781{
1782 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1783 return;
1784
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001785 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001786}
1787
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001788void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1789 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001790{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001791 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001792}
1793
Steven Rostedt03889382009-12-11 09:48:22 -05001794/**
1795 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001796 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001797 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001798void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001799{
1800 unsigned long flags;
1801
1802 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001803 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001804
1805 local_save_flags(flags);
1806
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001807 /*
1808 * Skip 3 more, seems to get us at the caller of
1809 * this function.
1810 */
1811 skip += 3;
1812 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1813 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001814}
1815
Steven Rostedt91e86e52010-11-10 12:56:12 +01001816static DEFINE_PER_CPU(int, user_stack_count);
1817
Steven Rostedte77405a2009-09-02 14:17:06 -04001818void
1819ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001820{
Tom Zanussie1112b42009-03-31 00:48:49 -05001821 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001822 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001823 struct userstack_entry *entry;
1824 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001825
1826 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1827 return;
1828
Steven Rostedtb6345872010-03-12 20:03:30 -05001829 /*
1830 * NMIs can not handle page faults, even with fix ups.
1831 * The save user stack can (and often does) fault.
1832 */
1833 if (unlikely(in_nmi()))
1834 return;
1835
Steven Rostedt91e86e52010-11-10 12:56:12 +01001836 /*
1837 * prevent recursion, since the user stack tracing may
1838 * trigger other kernel events.
1839 */
1840 preempt_disable();
1841 if (__this_cpu_read(user_stack_count))
1842 goto out;
1843
1844 __this_cpu_inc(user_stack_count);
1845
Steven Rostedte77405a2009-09-02 14:17:06 -04001846 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001847 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001848 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001849 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001850 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001851
Steven Rostedt48659d32009-09-11 11:36:23 -04001852 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001853 memset(&entry->caller, 0, sizeof(entry->caller));
1854
1855 trace.nr_entries = 0;
1856 trace.max_entries = FTRACE_STACK_ENTRIES;
1857 trace.skip = 0;
1858 trace.entries = entry->caller;
1859
1860 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001861 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001862 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001863
Li Zefan1dbd1952010-12-09 15:47:56 +08001864 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001865 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001866 out:
1867 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001868}
1869
Hannes Eder4fd27352009-02-10 19:44:12 +01001870#ifdef UNUSED
1871static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001872{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001873 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001874}
Hannes Eder4fd27352009-02-10 19:44:12 +01001875#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001876
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001877#endif /* CONFIG_STACKTRACE */
1878
Steven Rostedt07d777f2011-09-22 14:01:55 -04001879/* created for use with alloc_percpu */
1880struct trace_buffer_struct {
1881 char buffer[TRACE_BUF_SIZE];
1882};
1883
1884static struct trace_buffer_struct *trace_percpu_buffer;
1885static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1886static struct trace_buffer_struct *trace_percpu_irq_buffer;
1887static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1888
1889/*
1890 * The buffer used is dependent on the context. There is a per cpu
1891 * buffer for normal context, softirq contex, hard irq context and
1892 * for NMI context. Thise allows for lockless recording.
1893 *
1894 * Note, if the buffers failed to be allocated, then this returns NULL
1895 */
1896static char *get_trace_buf(void)
1897{
1898 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001899
1900 /*
1901 * If we have allocated per cpu buffers, then we do not
1902 * need to do any locking.
1903 */
1904 if (in_nmi())
1905 percpu_buffer = trace_percpu_nmi_buffer;
1906 else if (in_irq())
1907 percpu_buffer = trace_percpu_irq_buffer;
1908 else if (in_softirq())
1909 percpu_buffer = trace_percpu_sirq_buffer;
1910 else
1911 percpu_buffer = trace_percpu_buffer;
1912
1913 if (!percpu_buffer)
1914 return NULL;
1915
Shan Weid8a03492012-11-13 09:53:04 +08001916 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001917}
1918
1919static int alloc_percpu_trace_buffer(void)
1920{
1921 struct trace_buffer_struct *buffers;
1922 struct trace_buffer_struct *sirq_buffers;
1923 struct trace_buffer_struct *irq_buffers;
1924 struct trace_buffer_struct *nmi_buffers;
1925
1926 buffers = alloc_percpu(struct trace_buffer_struct);
1927 if (!buffers)
1928 goto err_warn;
1929
1930 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1931 if (!sirq_buffers)
1932 goto err_sirq;
1933
1934 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1935 if (!irq_buffers)
1936 goto err_irq;
1937
1938 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1939 if (!nmi_buffers)
1940 goto err_nmi;
1941
1942 trace_percpu_buffer = buffers;
1943 trace_percpu_sirq_buffer = sirq_buffers;
1944 trace_percpu_irq_buffer = irq_buffers;
1945 trace_percpu_nmi_buffer = nmi_buffers;
1946
1947 return 0;
1948
1949 err_nmi:
1950 free_percpu(irq_buffers);
1951 err_irq:
1952 free_percpu(sirq_buffers);
1953 err_sirq:
1954 free_percpu(buffers);
1955 err_warn:
1956 WARN(1, "Could not allocate percpu trace_printk buffer");
1957 return -ENOMEM;
1958}
1959
Steven Rostedt81698832012-10-11 10:15:05 -04001960static int buffers_allocated;
1961
Steven Rostedt07d777f2011-09-22 14:01:55 -04001962void trace_printk_init_buffers(void)
1963{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001964 if (buffers_allocated)
1965 return;
1966
1967 if (alloc_percpu_trace_buffer())
1968 return;
1969
1970 pr_info("ftrace: Allocated trace_printk buffers\n");
1971
Steven Rostedtb382ede62012-10-10 21:44:34 -04001972 /* Expand the buffers to set size */
1973 tracing_update_buffers();
1974
Steven Rostedt07d777f2011-09-22 14:01:55 -04001975 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001976
1977 /*
1978 * trace_printk_init_buffers() can be called by modules.
1979 * If that happens, then we need to start cmdline recording
1980 * directly here. If the global_trace.buffer is already
1981 * allocated here, then this was called by module code.
1982 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001983 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04001984 tracing_start_cmdline_record();
1985}
1986
1987void trace_printk_start_comm(void)
1988{
1989 /* Start tracing comms if trace printk is set */
1990 if (!buffers_allocated)
1991 return;
1992 tracing_start_cmdline_record();
1993}
1994
1995static void trace_printk_start_stop_comm(int enabled)
1996{
1997 if (!buffers_allocated)
1998 return;
1999
2000 if (enabled)
2001 tracing_start_cmdline_record();
2002 else
2003 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002004}
2005
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002006/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002007 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002008 *
2009 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002010int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002011{
Tom Zanussie1112b42009-03-31 00:48:49 -05002012 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002013 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002014 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002015 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002016 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002017 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002018 char *tbuffer;
2019 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002020
2021 if (unlikely(tracing_selftest_running || tracing_disabled))
2022 return 0;
2023
2024 /* Don't pollute graph traces with trace_vprintk internals */
2025 pause_graph_tracing();
2026
2027 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002028 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002029
Steven Rostedt07d777f2011-09-22 14:01:55 -04002030 tbuffer = get_trace_buf();
2031 if (!tbuffer) {
2032 len = 0;
2033 goto out;
2034 }
2035
2036 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2037
2038 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002039 goto out;
2040
Steven Rostedt07d777f2011-09-22 14:01:55 -04002041 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002042 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002043 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002044 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2045 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002046 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002047 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002048 entry = ring_buffer_event_data(event);
2049 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002050 entry->fmt = fmt;
2051
Steven Rostedt07d777f2011-09-22 14:01:55 -04002052 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002053 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002054 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002055 ftrace_trace_stack(buffer, flags, 6, pc);
2056 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002057
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002059 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002060 unpause_graph_tracing();
2061
2062 return len;
2063}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002064EXPORT_SYMBOL_GPL(trace_vbprintk);
2065
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002066static int
2067__trace_array_vprintk(struct ring_buffer *buffer,
2068 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002069{
Tom Zanussie1112b42009-03-31 00:48:49 -05002070 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002071 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002072 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002073 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002074 unsigned long flags;
2075 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002076
2077 if (tracing_disabled || tracing_selftest_running)
2078 return 0;
2079
Steven Rostedt07d777f2011-09-22 14:01:55 -04002080 /* Don't pollute graph traces with trace_vprintk internals */
2081 pause_graph_tracing();
2082
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002083 pc = preempt_count();
2084 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086
2087 tbuffer = get_trace_buf();
2088 if (!tbuffer) {
2089 len = 0;
2090 goto out;
2091 }
2092
2093 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2094 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095 goto out;
2096
Steven Rostedt07d777f2011-09-22 14:01:55 -04002097 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002098 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002099 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002100 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002101 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002103 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002104 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002105
Steven Rostedt07d777f2011-09-22 14:01:55 -04002106 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002107 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002108 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002109 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002110 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002111 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002112 out:
2113 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002114 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002115
2116 return len;
2117}
Steven Rostedt659372d2009-09-03 19:11:07 -04002118
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002119int trace_array_vprintk(struct trace_array *tr,
2120 unsigned long ip, const char *fmt, va_list args)
2121{
2122 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2123}
2124
2125int trace_array_printk(struct trace_array *tr,
2126 unsigned long ip, const char *fmt, ...)
2127{
2128 int ret;
2129 va_list ap;
2130
2131 if (!(trace_flags & TRACE_ITER_PRINTK))
2132 return 0;
2133
2134 va_start(ap, fmt);
2135 ret = trace_array_vprintk(tr, ip, fmt, ap);
2136 va_end(ap);
2137 return ret;
2138}
2139
2140int trace_array_printk_buf(struct ring_buffer *buffer,
2141 unsigned long ip, const char *fmt, ...)
2142{
2143 int ret;
2144 va_list ap;
2145
2146 if (!(trace_flags & TRACE_ITER_PRINTK))
2147 return 0;
2148
2149 va_start(ap, fmt);
2150 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2151 va_end(ap);
2152 return ret;
2153}
2154
Steven Rostedt659372d2009-09-03 19:11:07 -04002155int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2156{
Steven Rostedta813a152009-10-09 01:41:35 -04002157 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002158}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002159EXPORT_SYMBOL_GPL(trace_vprintk);
2160
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002161static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002162{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002163 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2164
Steven Rostedt5a90f572008-09-03 17:42:51 -04002165 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002166 if (buf_iter)
2167 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002168}
2169
Ingo Molnare309b412008-05-12 21:20:51 +02002170static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002171peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2172 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002173{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002174 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002175 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002176
Steven Rostedtd7690412008-10-01 00:29:53 -04002177 if (buf_iter)
2178 event = ring_buffer_iter_peek(buf_iter, ts);
2179 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002180 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002181 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002182
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002183 if (event) {
2184 iter->ent_size = ring_buffer_event_length(event);
2185 return ring_buffer_event_data(event);
2186 }
2187 iter->ent_size = 0;
2188 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002189}
Steven Rostedtd7690412008-10-01 00:29:53 -04002190
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002191static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002192__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2193 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002194{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002195 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002196 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002197 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002198 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002199 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002200 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002201 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002202 int cpu;
2203
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002204 /*
2205 * If we are in a per_cpu trace file, don't bother by iterating over
2206 * all cpu and peek directly.
2207 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002208 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002209 if (ring_buffer_empty_cpu(buffer, cpu_file))
2210 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002211 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002212 if (ent_cpu)
2213 *ent_cpu = cpu_file;
2214
2215 return ent;
2216 }
2217
Steven Rostedtab464282008-05-12 21:21:00 +02002218 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002219
2220 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002221 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002222
Steven Rostedtbc21b472010-03-31 19:49:26 -04002223 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002224
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002225 /*
2226 * Pick the entry with the smallest timestamp:
2227 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002228 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002229 next = ent;
2230 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002231 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002232 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002233 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002234 }
2235 }
2236
Steven Rostedt12b5da32012-03-27 10:43:28 -04002237 iter->ent_size = next_size;
2238
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002239 if (ent_cpu)
2240 *ent_cpu = next_cpu;
2241
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002242 if (ent_ts)
2243 *ent_ts = next_ts;
2244
Steven Rostedtbc21b472010-03-31 19:49:26 -04002245 if (missing_events)
2246 *missing_events = next_lost;
2247
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002248 return next;
2249}
2250
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002251/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002252struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2253 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002254{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002255 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002256}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002257
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002258/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002259void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002260{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002261 iter->ent = __find_next_entry(iter, &iter->cpu,
2262 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002263
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002264 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002265 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002266
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002267 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002268}
2269
Ingo Molnare309b412008-05-12 21:20:51 +02002270static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002271{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002272 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002273 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274}
2275
Ingo Molnare309b412008-05-12 21:20:51 +02002276static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002277{
2278 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002279 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002280 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002281
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002282 WARN_ON_ONCE(iter->leftover);
2283
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002284 (*pos)++;
2285
2286 /* can't go backwards */
2287 if (iter->idx > i)
2288 return NULL;
2289
2290 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002291 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002292 else
2293 ent = iter;
2294
2295 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002296 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297
2298 iter->pos = *pos;
2299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 return ent;
2301}
2302
Jason Wessel955b61e2010-08-05 09:22:23 -05002303void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002304{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002305 struct ring_buffer_event *event;
2306 struct ring_buffer_iter *buf_iter;
2307 unsigned long entries = 0;
2308 u64 ts;
2309
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002310 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002311
Steven Rostedt6d158a82012-06-27 20:46:14 -04002312 buf_iter = trace_buffer_iter(iter, cpu);
2313 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002314 return;
2315
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002316 ring_buffer_iter_reset(buf_iter);
2317
2318 /*
2319 * We could have the case with the max latency tracers
2320 * that a reset never took place on a cpu. This is evident
2321 * by the timestamp being before the start of the buffer.
2322 */
2323 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002324 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002325 break;
2326 entries++;
2327 ring_buffer_read(buf_iter, NULL);
2328 }
2329
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002330 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002331}
2332
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002333/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002334 * The current tracer is copied to avoid a global locking
2335 * all around.
2336 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002337static void *s_start(struct seq_file *m, loff_t *pos)
2338{
2339 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002340 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002341 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002342 void *p = NULL;
2343 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002344 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002345
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002346 /*
2347 * copy the tracer to avoid using a global lock all around.
2348 * iter->trace is a copy of current_trace, the pointer to the
2349 * name may be used instead of a strcmp(), as iter->trace->name
2350 * will point to the same string as current_trace->name.
2351 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002353 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2354 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002355 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002356
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002357#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002358 if (iter->snapshot && iter->trace->use_max_tr)
2359 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002360#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002361
2362 if (!iter->snapshot)
2363 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002364
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365 if (*pos != iter->pos) {
2366 iter->ent = NULL;
2367 iter->cpu = 0;
2368 iter->idx = -1;
2369
Steven Rostedtae3b5092013-01-23 15:22:59 -05002370 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002371 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002372 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002373 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002374 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375
Lai Jiangshanac91d852010-03-02 17:54:50 +08002376 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002377 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2378 ;
2379
2380 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002381 /*
2382 * If we overflowed the seq_file before, then we want
2383 * to just reuse the trace_seq buffer again.
2384 */
2385 if (iter->leftover)
2386 p = iter;
2387 else {
2388 l = *pos - 1;
2389 p = s_next(m, p, &l);
2390 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391 }
2392
Lai Jiangshan4f535962009-05-18 19:35:34 +08002393 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002394 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002395 return p;
2396}
2397
2398static void s_stop(struct seq_file *m, void *p)
2399{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002400 struct trace_iterator *iter = m->private;
2401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002402#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002403 if (iter->snapshot && iter->trace->use_max_tr)
2404 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002405#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002406
2407 if (!iter->snapshot)
2408 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002409
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002410 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002411 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002412}
2413
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002414static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002415get_total_entries(struct trace_buffer *buf,
2416 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002417{
2418 unsigned long count;
2419 int cpu;
2420
2421 *total = 0;
2422 *entries = 0;
2423
2424 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002426 /*
2427 * If this buffer has skipped entries, then we hold all
2428 * entries for the trace and we need to ignore the
2429 * ones before the time stamp.
2430 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2432 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002433 /* total is the same as the entries */
2434 *total += count;
2435 } else
2436 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002437 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002438 *entries += count;
2439 }
2440}
2441
Ingo Molnare309b412008-05-12 21:20:51 +02002442static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443{
Michael Ellermana6168352008-08-20 16:36:11 -07002444 seq_puts(m, "# _------=> CPU# \n");
2445 seq_puts(m, "# / _-----=> irqs-off \n");
2446 seq_puts(m, "# | / _----=> need-resched \n");
2447 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2448 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002449 seq_puts(m, "# |||| / delay \n");
2450 seq_puts(m, "# cmd pid ||||| time | caller \n");
2451 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002452}
2453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002454static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002456 unsigned long total;
2457 unsigned long entries;
2458
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002459 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002460 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2461 entries, total, num_online_cpus());
2462 seq_puts(m, "#\n");
2463}
2464
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002465static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002466{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002467 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002468 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002469 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470}
2471
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002472static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002473{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002474 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002475 seq_puts(m, "# _-----=> irqs-off\n");
2476 seq_puts(m, "# / _----=> need-resched\n");
2477 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2478 seq_puts(m, "# || / _--=> preempt-depth\n");
2479 seq_puts(m, "# ||| / delay\n");
2480 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2481 seq_puts(m, "# | | | |||| | |\n");
2482}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002483
Jiri Olsa62b915f2010-04-02 19:01:22 +02002484void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002485print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2486{
2487 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488 struct trace_buffer *buf = iter->trace_buffer;
2489 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002490 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002491 unsigned long entries;
2492 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002493 const char *name = "preemption";
2494
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002495 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002496
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002497 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002498
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002499 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002500 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002501 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002502 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002503 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002504 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002505 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002506 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002507 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002508 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509#if defined(CONFIG_PREEMPT_NONE)
2510 "server",
2511#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2512 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002513#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002514 "preempt",
2515#else
2516 "unknown",
2517#endif
2518 /* These are reserved for later use */
2519 0, 0, 0, 0);
2520#ifdef CONFIG_SMP
2521 seq_printf(m, " #P:%d)\n", num_online_cpus());
2522#else
2523 seq_puts(m, ")\n");
2524#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002525 seq_puts(m, "# -----------------\n");
2526 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002527 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002528 data->comm, data->pid,
2529 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002531 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002532
2533 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002534 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002535 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2536 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002537 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002538 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2539 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002540 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002541 }
2542
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002543 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002544}
2545
Steven Rostedta3097202008-11-07 22:36:02 -05002546static void test_cpu_buff_start(struct trace_iterator *iter)
2547{
2548 struct trace_seq *s = &iter->seq;
2549
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002550 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2551 return;
2552
2553 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2554 return;
2555
Rusty Russell44623442009-01-01 10:12:23 +10302556 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002557 return;
2558
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002559 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002560 return;
2561
Rusty Russell44623442009-01-01 10:12:23 +10302562 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002563
2564 /* Don't print started cpu buffer for the first entry of the trace */
2565 if (iter->idx > 1)
2566 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2567 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002568}
2569
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002570static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571{
Steven Rostedt214023c2008-05-12 21:20:46 +02002572 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002574 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002575 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002577 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002578
Steven Rostedta3097202008-11-07 22:36:02 -05002579 test_cpu_buff_start(iter);
2580
Steven Rostedtf633cef2008-12-23 23:24:13 -05002581 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002582
2583 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002584 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2585 if (!trace_print_lat_context(iter))
2586 goto partial;
2587 } else {
2588 if (!trace_print_context(iter))
2589 goto partial;
2590 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002591 }
2592
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002593 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002594 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002595
2596 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2597 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002598
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002599 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002600partial:
2601 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002602}
2603
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002604static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002605{
2606 struct trace_seq *s = &iter->seq;
2607 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002608 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002609
2610 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002611
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002612 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002613 if (!trace_seq_printf(s, "%d %d %llu ",
2614 entry->pid, iter->cpu, iter->ts))
2615 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002616 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002617
Steven Rostedtf633cef2008-12-23 23:24:13 -05002618 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002619 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002620 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002621
2622 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2623 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002624
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002625 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002626partial:
2627 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002628}
2629
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002630static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002631{
2632 struct trace_seq *s = &iter->seq;
2633 unsigned char newline = '\n';
2634 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002635 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002636
2637 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002638
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002639 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2640 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2641 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2642 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2643 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002644
Steven Rostedtf633cef2008-12-23 23:24:13 -05002645 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002646 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002647 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002648 if (ret != TRACE_TYPE_HANDLED)
2649 return ret;
2650 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002651
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002652 SEQ_PUT_FIELD_RET(s, newline);
2653
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002654 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002655}
2656
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002657static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002658{
2659 struct trace_seq *s = &iter->seq;
2660 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002661 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002662
2663 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002664
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002665 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2666 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002667 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002668 SEQ_PUT_FIELD_RET(s, iter->ts);
2669 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002670
Steven Rostedtf633cef2008-12-23 23:24:13 -05002671 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event ? event->funcs->binary(iter, 0, event) :
2673 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002674}
2675
Jiri Olsa62b915f2010-04-02 19:01:22 +02002676int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002677{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002678 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002679 int cpu;
2680
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002681 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002682 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002683 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002684 buf_iter = trace_buffer_iter(iter, cpu);
2685 if (buf_iter) {
2686 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002687 return 0;
2688 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002689 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002690 return 0;
2691 }
2692 return 1;
2693 }
2694
Steven Rostedtab464282008-05-12 21:21:00 +02002695 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002696 buf_iter = trace_buffer_iter(iter, cpu);
2697 if (buf_iter) {
2698 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002699 return 0;
2700 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002701 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002702 return 0;
2703 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002704 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002705
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002706 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002707}
2708
Lai Jiangshan4f535962009-05-18 19:35:34 +08002709/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002710enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002711{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002712 enum print_line_t ret;
2713
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002714 if (iter->lost_events &&
2715 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2716 iter->cpu, iter->lost_events))
2717 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002718
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002719 if (iter->trace && iter->trace->print_line) {
2720 ret = iter->trace->print_line(iter);
2721 if (ret != TRACE_TYPE_UNHANDLED)
2722 return ret;
2723 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002724
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002725 if (iter->ent->type == TRACE_BPUTS &&
2726 trace_flags & TRACE_ITER_PRINTK &&
2727 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2728 return trace_print_bputs_msg_only(iter);
2729
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002730 if (iter->ent->type == TRACE_BPRINT &&
2731 trace_flags & TRACE_ITER_PRINTK &&
2732 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002733 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002734
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002735 if (iter->ent->type == TRACE_PRINT &&
2736 trace_flags & TRACE_ITER_PRINTK &&
2737 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002738 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002739
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002740 if (trace_flags & TRACE_ITER_BIN)
2741 return print_bin_fmt(iter);
2742
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002743 if (trace_flags & TRACE_ITER_HEX)
2744 return print_hex_fmt(iter);
2745
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002746 if (trace_flags & TRACE_ITER_RAW)
2747 return print_raw_fmt(iter);
2748
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002749 return print_trace_fmt(iter);
2750}
2751
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002752void trace_latency_header(struct seq_file *m)
2753{
2754 struct trace_iterator *iter = m->private;
2755
2756 /* print nothing if the buffers are empty */
2757 if (trace_empty(iter))
2758 return;
2759
2760 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2761 print_trace_header(m, iter);
2762
2763 if (!(trace_flags & TRACE_ITER_VERBOSE))
2764 print_lat_help_header(m);
2765}
2766
Jiri Olsa62b915f2010-04-02 19:01:22 +02002767void trace_default_header(struct seq_file *m)
2768{
2769 struct trace_iterator *iter = m->private;
2770
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002771 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2772 return;
2773
Jiri Olsa62b915f2010-04-02 19:01:22 +02002774 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2775 /* print nothing if the buffers are empty */
2776 if (trace_empty(iter))
2777 return;
2778 print_trace_header(m, iter);
2779 if (!(trace_flags & TRACE_ITER_VERBOSE))
2780 print_lat_help_header(m);
2781 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002782 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2783 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002784 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002785 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002786 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002787 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002788 }
2789}
2790
Steven Rostedte0a413f2011-09-29 21:26:16 -04002791static void test_ftrace_alive(struct seq_file *m)
2792{
2793 if (!ftrace_is_dead())
2794 return;
2795 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2796 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2797}
2798
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002799#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002800static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002801{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002802 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2803 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2804 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002805 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002806 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2807 seq_printf(m, "# is not a '0' or '1')\n");
2808}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002809
2810static void show_snapshot_percpu_help(struct seq_file *m)
2811{
2812 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2813#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2814 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2815 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2816#else
2817 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2818 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2819#endif
2820 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2821 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2822 seq_printf(m, "# is not a '0' or '1')\n");
2823}
2824
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002825static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2826{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002827 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002828 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2829 else
2830 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2831
2832 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002833 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2834 show_snapshot_main_help(m);
2835 else
2836 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002837}
2838#else
2839/* Should never be called */
2840static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2841#endif
2842
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002843static int s_show(struct seq_file *m, void *v)
2844{
2845 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002846 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002847
2848 if (iter->ent == NULL) {
2849 if (iter->tr) {
2850 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2851 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002852 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002853 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002854 if (iter->snapshot && trace_empty(iter))
2855 print_snapshot_help(m, iter);
2856 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002857 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002858 else
2859 trace_default_header(m);
2860
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002861 } else if (iter->leftover) {
2862 /*
2863 * If we filled the seq_file buffer earlier, we
2864 * want to just show it now.
2865 */
2866 ret = trace_print_seq(m, &iter->seq);
2867
2868 /* ret should this time be zero, but you never know */
2869 iter->leftover = ret;
2870
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002871 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002872 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002873 ret = trace_print_seq(m, &iter->seq);
2874 /*
2875 * If we overflow the seq_file buffer, then it will
2876 * ask us for this data again at start up.
2877 * Use that instead.
2878 * ret is 0 if seq_file write succeeded.
2879 * -1 otherwise.
2880 */
2881 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002882 }
2883
2884 return 0;
2885}
2886
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002887/*
2888 * Should be used after trace_array_get(), trace_types_lock
2889 * ensures that i_cdev was already initialized.
2890 */
2891static inline int tracing_get_cpu(struct inode *inode)
2892{
2893 if (inode->i_cdev) /* See trace_create_cpu_file() */
2894 return (long)inode->i_cdev - 1;
2895 return RING_BUFFER_ALL_CPUS;
2896}
2897
James Morris88e9d342009-09-22 16:43:43 -07002898static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002899 .start = s_start,
2900 .next = s_next,
2901 .stop = s_stop,
2902 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002903};
2904
Ingo Molnare309b412008-05-12 21:20:51 +02002905static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002906__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002907{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002908 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002910 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002911
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002912 if (tracing_disabled)
2913 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002914
Jiri Olsa50e18b92012-04-25 10:23:39 +02002915 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002916 if (!iter)
2917 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002918
Steven Rostedt6d158a82012-06-27 20:46:14 -04002919 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2920 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002921 if (!iter->buffer_iter)
2922 goto release;
2923
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002924 /*
2925 * We make a copy of the current tracer to avoid concurrent
2926 * changes on it while we are reading.
2927 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002928 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002929 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002930 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002931 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002932
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002933 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002934
Li Zefan79f55992009-06-15 14:58:26 +08002935 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002936 goto fail;
2937
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002938 iter->tr = tr;
2939
2940#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002941 /* Currently only the top directory has a snapshot */
2942 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002943 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002944 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002945#endif
2946 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002947 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002948 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002949 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002950 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002952 /* Notify the tracer early; before we stop tracing. */
2953 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002954 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002955
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002956 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002957 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002958 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2959
David Sharp8be07092012-11-13 12:18:22 -08002960 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002961 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002962 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2963
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002964 /* stop the trace while dumping if we are not opening "snapshot" */
2965 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002966 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002967
Steven Rostedtae3b5092013-01-23 15:22:59 -05002968 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002969 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002970 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002971 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002972 }
2973 ring_buffer_read_prepare_sync();
2974 for_each_tracing_cpu(cpu) {
2975 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002976 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002977 }
2978 } else {
2979 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002980 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002981 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002982 ring_buffer_read_prepare_sync();
2983 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002984 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002985 }
2986
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987 mutex_unlock(&trace_types_lock);
2988
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002990
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002991 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002992 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002993 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002994 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002995release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02002996 seq_release_private(inode, file);
2997 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998}
2999
3000int tracing_open_generic(struct inode *inode, struct file *filp)
3001{
Steven Rostedt60a11772008-05-12 21:20:44 +02003002 if (tracing_disabled)
3003 return -ENODEV;
3004
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005 filp->private_data = inode->i_private;
3006 return 0;
3007}
3008
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003009bool tracing_is_disabled(void)
3010{
3011 return (tracing_disabled) ? true: false;
3012}
3013
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003014/*
3015 * Open and update trace_array ref count.
3016 * Must have the current trace_array passed to it.
3017 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003018static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003019{
3020 struct trace_array *tr = inode->i_private;
3021
3022 if (tracing_disabled)
3023 return -ENODEV;
3024
3025 if (trace_array_get(tr) < 0)
3026 return -ENODEV;
3027
3028 filp->private_data = inode->i_private;
3029
3030 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003031}
3032
Hannes Eder4fd27352009-02-10 19:44:12 +01003033static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003035 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003036 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003037 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003038 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003039
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003040 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003041 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003042 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003043 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003044
Oleg Nesterov6484c712013-07-23 17:26:10 +02003045 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003046 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003047 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003048
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003049 for_each_tracing_cpu(cpu) {
3050 if (iter->buffer_iter[cpu])
3051 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3052 }
3053
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003054 if (iter->trace && iter->trace->close)
3055 iter->trace->close(iter);
3056
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003057 if (!iter->snapshot)
3058 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003059 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003060
3061 __trace_array_put(tr);
3062
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 mutex_unlock(&trace_types_lock);
3064
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003065 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003066 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003067 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003068 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003069 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003070
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003071 return 0;
3072}
3073
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003074static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3075{
3076 struct trace_array *tr = inode->i_private;
3077
3078 trace_array_put(tr);
3079 return 0;
3080}
3081
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003082static int tracing_single_release_tr(struct inode *inode, struct file *file)
3083{
3084 struct trace_array *tr = inode->i_private;
3085
3086 trace_array_put(tr);
3087
3088 return single_release(inode, file);
3089}
3090
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003091static int tracing_open(struct inode *inode, struct file *file)
3092{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003093 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003094 struct trace_iterator *iter;
3095 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003096
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003097 if (trace_array_get(tr) < 0)
3098 return -ENODEV;
3099
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003100 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003101 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3102 int cpu = tracing_get_cpu(inode);
3103
3104 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003105 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003106 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003107 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003108 }
3109
3110 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003112 if (IS_ERR(iter))
3113 ret = PTR_ERR(iter);
3114 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3115 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3116 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003117
3118 if (ret < 0)
3119 trace_array_put(tr);
3120
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121 return ret;
3122}
3123
Ingo Molnare309b412008-05-12 21:20:51 +02003124static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003125t_next(struct seq_file *m, void *v, loff_t *pos)
3126{
Li Zefanf129e962009-06-24 09:53:44 +08003127 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003128
3129 (*pos)++;
3130
3131 if (t)
3132 t = t->next;
3133
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003134 return t;
3135}
3136
3137static void *t_start(struct seq_file *m, loff_t *pos)
3138{
Li Zefanf129e962009-06-24 09:53:44 +08003139 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003140 loff_t l = 0;
3141
3142 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08003143 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144 ;
3145
3146 return t;
3147}
3148
3149static void t_stop(struct seq_file *m, void *p)
3150{
3151 mutex_unlock(&trace_types_lock);
3152}
3153
3154static int t_show(struct seq_file *m, void *v)
3155{
3156 struct tracer *t = v;
3157
3158 if (!t)
3159 return 0;
3160
3161 seq_printf(m, "%s", t->name);
3162 if (t->next)
3163 seq_putc(m, ' ');
3164 else
3165 seq_putc(m, '\n');
3166
3167 return 0;
3168}
3169
James Morris88e9d342009-09-22 16:43:43 -07003170static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003171 .start = t_start,
3172 .next = t_next,
3173 .stop = t_stop,
3174 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003175};
3176
3177static int show_traces_open(struct inode *inode, struct file *file)
3178{
Steven Rostedt60a11772008-05-12 21:20:44 +02003179 if (tracing_disabled)
3180 return -ENODEV;
3181
Li Zefanf129e962009-06-24 09:53:44 +08003182 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003183}
3184
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003185static ssize_t
3186tracing_write_stub(struct file *filp, const char __user *ubuf,
3187 size_t count, loff_t *ppos)
3188{
3189 return count;
3190}
3191
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003192loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003193{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003194 int ret;
3195
Slava Pestov364829b2010-11-24 15:13:16 -08003196 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003197 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003198 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003199 file->f_pos = ret = 0;
3200
3201 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003202}
3203
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003204static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003205 .open = tracing_open,
3206 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003207 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003208 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003209 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003210};
3211
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003212static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003213 .open = show_traces_open,
3214 .read = seq_read,
3215 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003216 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003217};
3218
Ingo Molnar36dfe922008-05-12 21:20:52 +02003219/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003220 * The tracer itself will not take this lock, but still we want
3221 * to provide a consistent cpumask to user-space:
3222 */
3223static DEFINE_MUTEX(tracing_cpumask_update_lock);
3224
3225/*
3226 * Temporary storage for the character representation of the
3227 * CPU bitmask (and one more byte for the newline):
3228 */
3229static char mask_str[NR_CPUS + 1];
3230
Ingo Molnarc7078de2008-05-12 21:20:52 +02003231static ssize_t
3232tracing_cpumask_read(struct file *filp, char __user *ubuf,
3233 size_t count, loff_t *ppos)
3234{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003235 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003236 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003237
3238 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003239
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003240 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003241 if (count - len < 2) {
3242 count = -EINVAL;
3243 goto out_err;
3244 }
3245 len += sprintf(mask_str + len, "\n");
3246 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3247
3248out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003249 mutex_unlock(&tracing_cpumask_update_lock);
3250
3251 return count;
3252}
3253
3254static ssize_t
3255tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3256 size_t count, loff_t *ppos)
3257{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003258 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303259 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003260 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303261
3262 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3263 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003264
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303265 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003266 if (err)
3267 goto err_unlock;
3268
Li Zefan215368e2009-06-15 10:56:42 +08003269 mutex_lock(&tracing_cpumask_update_lock);
3270
Steven Rostedta5e25882008-12-02 15:34:05 -05003271 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003272 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003273 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003274 /*
3275 * Increase/decrease the disabled counter if we are
3276 * about to flip a bit in the cpumask:
3277 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003278 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303279 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003280 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3281 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003282 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003283 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303284 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003285 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3286 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003287 }
3288 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003289 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003290 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003291
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003292 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003293
Ingo Molnarc7078de2008-05-12 21:20:52 +02003294 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303295 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003296
Ingo Molnarc7078de2008-05-12 21:20:52 +02003297 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003298
3299err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003300 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003301
3302 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003303}
3304
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003305static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003306 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003307 .read = tracing_cpumask_read,
3308 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003309 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003310 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003311};
3312
Li Zefanfdb372e2009-12-08 11:15:59 +08003313static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003314{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003315 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003316 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003317 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003318 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003319
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003320 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003321 tracer_flags = tr->current_trace->flags->val;
3322 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003323
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003324 for (i = 0; trace_options[i]; i++) {
3325 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003326 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003328 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003329 }
3330
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003331 for (i = 0; trace_opts[i].name; i++) {
3332 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003333 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003334 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003335 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003336 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003337 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003338
Li Zefanfdb372e2009-12-08 11:15:59 +08003339 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003340}
3341
Li Zefan8d18eaa2009-12-08 11:17:06 +08003342static int __set_tracer_option(struct tracer *trace,
3343 struct tracer_flags *tracer_flags,
3344 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003345{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003346 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003347
Li Zefan8d18eaa2009-12-08 11:17:06 +08003348 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003349 if (ret)
3350 return ret;
3351
3352 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003353 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003354 else
Zhaolei77708412009-08-07 18:53:21 +08003355 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003356 return 0;
3357}
3358
Li Zefan8d18eaa2009-12-08 11:17:06 +08003359/* Try to assign a tracer specific option */
3360static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3361{
3362 struct tracer_flags *tracer_flags = trace->flags;
3363 struct tracer_opt *opts = NULL;
3364 int i;
3365
3366 for (i = 0; tracer_flags->opts[i].name; i++) {
3367 opts = &tracer_flags->opts[i];
3368
3369 if (strcmp(cmp, opts->name) == 0)
3370 return __set_tracer_option(trace, trace->flags,
3371 opts, neg);
3372 }
3373
3374 return -EINVAL;
3375}
3376
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003377/* Some tracers require overwrite to stay enabled */
3378int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3379{
3380 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3381 return -1;
3382
3383 return 0;
3384}
3385
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003386int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003387{
3388 /* do nothing if flag is already set */
3389 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003390 return 0;
3391
3392 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003393 if (tr->current_trace->flag_changed)
3394 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003395 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003396
3397 if (enabled)
3398 trace_flags |= mask;
3399 else
3400 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003401
3402 if (mask == TRACE_ITER_RECORD_CMD)
3403 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003404
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003405 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003406 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003407#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003408 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003409#endif
3410 }
Steven Rostedt81698832012-10-11 10:15:05 -04003411
3412 if (mask == TRACE_ITER_PRINTK)
3413 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003414
3415 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003416}
3417
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003418static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003419{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003420 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003421 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003422 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003423 int i;
3424
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003425 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426
Li Zefan8d18eaa2009-12-08 11:17:06 +08003427 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428 neg = 1;
3429 cmp += 2;
3430 }
3431
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003432 mutex_lock(&trace_types_lock);
3433
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003434 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003435 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003436 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 break;
3438 }
3439 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003440
3441 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003442 if (!trace_options[i])
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003443 ret = set_tracer_option(tr->current_trace, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003444
3445 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003446
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003447 return ret;
3448}
3449
3450static ssize_t
3451tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3452 size_t cnt, loff_t *ppos)
3453{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003454 struct seq_file *m = filp->private_data;
3455 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003456 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003457 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003458
3459 if (cnt >= sizeof(buf))
3460 return -EINVAL;
3461
3462 if (copy_from_user(&buf, ubuf, cnt))
3463 return -EFAULT;
3464
Steven Rostedta8dd2172013-01-09 20:54:17 -05003465 buf[cnt] = 0;
3466
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003467 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003468 if (ret < 0)
3469 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003470
Jiri Olsacf8517c2009-10-23 19:36:16 -04003471 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003472
3473 return cnt;
3474}
3475
Li Zefanfdb372e2009-12-08 11:15:59 +08003476static int tracing_trace_options_open(struct inode *inode, struct file *file)
3477{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003478 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003479 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003480
Li Zefanfdb372e2009-12-08 11:15:59 +08003481 if (tracing_disabled)
3482 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003483
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003484 if (trace_array_get(tr) < 0)
3485 return -ENODEV;
3486
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003487 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3488 if (ret < 0)
3489 trace_array_put(tr);
3490
3491 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003492}
3493
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003494static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003495 .open = tracing_trace_options_open,
3496 .read = seq_read,
3497 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003498 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003499 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003500};
3501
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003502static const char readme_msg[] =
3503 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003504 "# echo 0 > tracing_on : quick way to disable tracing\n"
3505 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3506 " Important files:\n"
3507 " trace\t\t\t- The static contents of the buffer\n"
3508 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3509 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3510 " current_tracer\t- function and latency tracers\n"
3511 " available_tracers\t- list of configured tracers for current_tracer\n"
3512 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3513 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3514 " trace_clock\t\t-change the clock used to order events\n"
3515 " local: Per cpu clock but may not be synced across CPUs\n"
3516 " global: Synced across CPUs but slows tracing down.\n"
3517 " counter: Not a clock, but just an increment\n"
3518 " uptime: Jiffy counter from time of boot\n"
3519 " perf: Same clock that perf events use\n"
3520#ifdef CONFIG_X86_64
3521 " x86-tsc: TSC cycle counter\n"
3522#endif
3523 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3524 " tracing_cpumask\t- Limit which CPUs to trace\n"
3525 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3526 "\t\t\t Remove sub-buffer with rmdir\n"
3527 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003528 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3529 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003530#ifdef CONFIG_DYNAMIC_FTRACE
3531 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003532 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3533 "\t\t\t functions\n"
3534 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3535 "\t modules: Can select a group via module\n"
3536 "\t Format: :mod:<module-name>\n"
3537 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3538 "\t triggers: a command to perform when function is hit\n"
3539 "\t Format: <function>:<trigger>[:count]\n"
3540 "\t trigger: traceon, traceoff\n"
3541 "\t\t enable_event:<system>:<event>\n"
3542 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003543#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003544 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003545#endif
3546#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003547 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003548#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003549 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3550 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3551 "\t The first one will disable tracing every time do_fault is hit\n"
3552 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3553 "\t The first time do trap is hit and it disables tracing, the\n"
3554 "\t counter will decrement to 2. If tracing is already disabled,\n"
3555 "\t the counter will not decrement. It only decrements when the\n"
3556 "\t trigger did work\n"
3557 "\t To remove trigger without count:\n"
3558 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3559 "\t To remove trigger with a count:\n"
3560 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003561 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003562 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3563 "\t modules: Can select a group via module command :mod:\n"
3564 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003565#endif /* CONFIG_DYNAMIC_FTRACE */
3566#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003567 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3568 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003569#endif
3570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3571 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3572 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3573#endif
3574#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003575 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3576 "\t\t\t snapshot buffer. Read the contents for more\n"
3577 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003578#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003579#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003580 " stack_trace\t\t- Shows the max stack trace when active\n"
3581 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003582 "\t\t\t Write into this file to reset the max size (trigger a\n"
3583 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003584#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003585 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3586 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003587#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003588#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003589 " events/\t\t- Directory containing all trace event subsystems:\n"
3590 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3591 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003592 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3593 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003594 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003595 " events/<system>/<event>/\t- Directory containing control files for\n"
3596 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003597 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3598 " filter\t\t- If set, only events passing filter are traced\n"
3599 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003600 "\t Format: <trigger>[:count][if <filter>]\n"
3601 "\t trigger: traceon, traceoff\n"
3602 "\t enable_event:<system>:<event>\n"
3603 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003604#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003605 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003606#endif
3607#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003608 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003609#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003610 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3611 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3612 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3613 "\t events/block/block_unplug/trigger\n"
3614 "\t The first disables tracing every time block_unplug is hit.\n"
3615 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3616 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3617 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3618 "\t Like function triggers, the counter is only decremented if it\n"
3619 "\t enabled or disabled tracing.\n"
3620 "\t To remove a trigger without a count:\n"
3621 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3622 "\t To remove a trigger with a count:\n"
3623 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3624 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003625;
3626
3627static ssize_t
3628tracing_readme_read(struct file *filp, char __user *ubuf,
3629 size_t cnt, loff_t *ppos)
3630{
3631 return simple_read_from_buffer(ubuf, cnt, ppos,
3632 readme_msg, strlen(readme_msg));
3633}
3634
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003635static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003636 .open = tracing_open_generic,
3637 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003638 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003639};
3640
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003641static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003642tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3643 size_t cnt, loff_t *ppos)
3644{
3645 char *buf_comm;
3646 char *file_buf;
3647 char *buf;
3648 int len = 0;
3649 int pid;
3650 int i;
3651
3652 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3653 if (!file_buf)
3654 return -ENOMEM;
3655
3656 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3657 if (!buf_comm) {
3658 kfree(file_buf);
3659 return -ENOMEM;
3660 }
3661
3662 buf = file_buf;
3663
3664 for (i = 0; i < SAVED_CMDLINES; i++) {
3665 int r;
3666
3667 pid = map_cmdline_to_pid[i];
3668 if (pid == -1 || pid == NO_CMDLINE_MAP)
3669 continue;
3670
3671 trace_find_cmdline(pid, buf_comm);
3672 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3673 buf += r;
3674 len += r;
3675 }
3676
3677 len = simple_read_from_buffer(ubuf, cnt, ppos,
3678 file_buf, len);
3679
3680 kfree(file_buf);
3681 kfree(buf_comm);
3682
3683 return len;
3684}
3685
3686static const struct file_operations tracing_saved_cmdlines_fops = {
3687 .open = tracing_open_generic,
3688 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003689 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003690};
3691
3692static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003693tracing_set_trace_read(struct file *filp, char __user *ubuf,
3694 size_t cnt, loff_t *ppos)
3695{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003696 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003697 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003698 int r;
3699
3700 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003701 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003702 mutex_unlock(&trace_types_lock);
3703
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003704 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003705}
3706
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003707int tracer_init(struct tracer *t, struct trace_array *tr)
3708{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003709 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003710 return t->init(tr);
3711}
3712
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003713static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003714{
3715 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003716
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003717 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003718 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003719}
3720
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003721#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003722/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003723static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3724 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003725{
3726 int cpu, ret = 0;
3727
3728 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3729 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003730 ret = ring_buffer_resize(trace_buf->buffer,
3731 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003732 if (ret < 0)
3733 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003734 per_cpu_ptr(trace_buf->data, cpu)->entries =
3735 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003736 }
3737 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003738 ret = ring_buffer_resize(trace_buf->buffer,
3739 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003740 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003741 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3742 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003743 }
3744
3745 return ret;
3746}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003747#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003748
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003749static int __tracing_resize_ring_buffer(struct trace_array *tr,
3750 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003751{
3752 int ret;
3753
3754 /*
3755 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003756 * we use the size that was given, and we can forget about
3757 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003758 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003759 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003760
Steven Rostedtb382ede62012-10-10 21:44:34 -04003761 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003762 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003763 return 0;
3764
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003765 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003766 if (ret < 0)
3767 return ret;
3768
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003769#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003770 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3771 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003772 goto out;
3773
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003774 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003775 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003776 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3777 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003778 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003779 /*
3780 * AARGH! We are left with different
3781 * size max buffer!!!!
3782 * The max buffer is our "snapshot" buffer.
3783 * When a tracer needs a snapshot (one of the
3784 * latency tracers), it swaps the max buffer
3785 * with the saved snap shot. We succeeded to
3786 * update the size of the main buffer, but failed to
3787 * update the size of the max buffer. But when we tried
3788 * to reset the main buffer to the original size, we
3789 * failed there too. This is very unlikely to
3790 * happen, but if it does, warn and kill all
3791 * tracing.
3792 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003793 WARN_ON(1);
3794 tracing_disabled = 1;
3795 }
3796 return ret;
3797 }
3798
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003799 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003800 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003801 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003802 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003803
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003804 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003805#endif /* CONFIG_TRACER_MAX_TRACE */
3806
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003807 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003808 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003809 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003810 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003811
3812 return ret;
3813}
3814
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003815static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3816 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003817{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003818 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003819
3820 mutex_lock(&trace_types_lock);
3821
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003822 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3823 /* make sure, this cpu is enabled in the mask */
3824 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3825 ret = -EINVAL;
3826 goto out;
3827 }
3828 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003829
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003830 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003831 if (ret < 0)
3832 ret = -ENOMEM;
3833
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003834out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003835 mutex_unlock(&trace_types_lock);
3836
3837 return ret;
3838}
3839
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003840
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003841/**
3842 * tracing_update_buffers - used by tracing facility to expand ring buffers
3843 *
3844 * To save on memory when the tracing is never used on a system with it
3845 * configured in. The ring buffers are set to a minimum size. But once
3846 * a user starts to use the tracing facility, then they need to grow
3847 * to their default size.
3848 *
3849 * This function is to be called when a tracer is about to be used.
3850 */
3851int tracing_update_buffers(void)
3852{
3853 int ret = 0;
3854
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003855 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003856 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003857 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003858 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003859 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003860
3861 return ret;
3862}
3863
Steven Rostedt577b7852009-02-26 23:43:05 -05003864struct trace_option_dentry;
3865
3866static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003867create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003868
3869static void
3870destroy_trace_option_files(struct trace_option_dentry *topts);
3871
Steven Rostedtb2821ae2009-02-02 21:38:32 -05003872static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873{
Steven Rostedt577b7852009-02-26 23:43:05 -05003874 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003875 struct trace_array *tr = &global_trace;
3876 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003877#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003878 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003879#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003880 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003881
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003882 mutex_lock(&trace_types_lock);
3883
Steven Rostedt73c51622009-03-11 13:42:01 -04003884 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003885 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003886 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003887 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003888 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003889 ret = 0;
3890 }
3891
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003892 for (t = trace_types; t; t = t->next) {
3893 if (strcmp(t->name, buf) == 0)
3894 break;
3895 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003896 if (!t) {
3897 ret = -EINVAL;
3898 goto out;
3899 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003900 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003901 goto out;
3902
Steven Rostedt9f029e82008-11-12 15:24:24 -05003903 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003904
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003905 tr->current_trace->enabled = false;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003906
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003907 if (tr->current_trace->reset)
3908 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003909
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003910 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003911 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003912
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003913#ifdef CONFIG_TRACER_MAX_TRACE
3914 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003915
3916 if (had_max_tr && !t->use_max_tr) {
3917 /*
3918 * We need to make sure that the update_max_tr sees that
3919 * current_trace changed to nop_trace to keep it from
3920 * swapping the buffers after we resize it.
3921 * The update_max_tr is called from interrupts disabled
3922 * so a synchronized_sched() is sufficient.
3923 */
3924 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003925 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003926 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003928 destroy_trace_option_files(topts);
3929
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003930 topts = create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003931
3932#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003933 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003934 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003935 if (ret < 0)
3936 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003937 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003938#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003939
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003940 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003941 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003942 if (ret)
3943 goto out;
3944 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003946 tr->current_trace = t;
3947 tr->current_trace->enabled = true;
Steven Rostedt9f029e82008-11-12 15:24:24 -05003948 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003949 out:
3950 mutex_unlock(&trace_types_lock);
3951
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003952 return ret;
3953}
3954
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003955static ssize_t
3956tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3957 size_t cnt, loff_t *ppos)
3958{
Li Zefanee6c2c12009-09-18 14:06:47 +08003959 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003960 int i;
3961 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003962 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003963
Steven Rostedt60063a62008-10-28 10:44:24 -04003964 ret = cnt;
3965
Li Zefanee6c2c12009-09-18 14:06:47 +08003966 if (cnt > MAX_TRACER_SIZE)
3967 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003968
3969 if (copy_from_user(&buf, ubuf, cnt))
3970 return -EFAULT;
3971
3972 buf[cnt] = 0;
3973
3974 /* strip ending whitespace. */
3975 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3976 buf[i] = 0;
3977
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003978 err = tracing_set_tracer(buf);
3979 if (err)
3980 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003981
Jiri Olsacf8517c2009-10-23 19:36:16 -04003982 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003983
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003984 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003985}
3986
3987static ssize_t
3988tracing_max_lat_read(struct file *filp, char __user *ubuf,
3989 size_t cnt, loff_t *ppos)
3990{
3991 unsigned long *ptr = filp->private_data;
3992 char buf[64];
3993 int r;
3994
Steven Rostedtcffae432008-05-12 21:21:00 +02003995 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003996 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02003997 if (r > sizeof(buf))
3998 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003999 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004000}
4001
4002static ssize_t
4003tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4004 size_t cnt, loff_t *ppos)
4005{
Hannes Eder5e398412009-02-10 19:44:34 +01004006 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004007 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004008 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004009
Peter Huewe22fe9b52011-06-07 21:58:27 +02004010 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4011 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004012 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004013
4014 *ptr = val * 1000;
4015
4016 return cnt;
4017}
4018
Steven Rostedtb3806b42008-05-12 21:20:46 +02004019static int tracing_open_pipe(struct inode *inode, struct file *filp)
4020{
Oleg Nesterov15544202013-07-23 17:25:57 +02004021 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004022 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004023 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004024
4025 if (tracing_disabled)
4026 return -ENODEV;
4027
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004028 if (trace_array_get(tr) < 0)
4029 return -ENODEV;
4030
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004031 mutex_lock(&trace_types_lock);
4032
Steven Rostedtb3806b42008-05-12 21:20:46 +02004033 /* create a buffer to store the information to pass to userspace */
4034 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004035 if (!iter) {
4036 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004037 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004038 goto out;
4039 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004040
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004041 /*
4042 * We make a copy of the current tracer to avoid concurrent
4043 * changes on it while we are reading.
4044 */
4045 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4046 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004047 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004048 goto fail;
4049 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004050 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004051
4052 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4053 ret = -ENOMEM;
4054 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304055 }
4056
Steven Rostedta3097202008-11-07 22:36:02 -05004057 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304058 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004059
Steven Rostedt112f38a72009-06-01 15:16:05 -04004060 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4061 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4062
David Sharp8be07092012-11-13 12:18:22 -08004063 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004064 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004065 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4066
Oleg Nesterov15544202013-07-23 17:25:57 +02004067 iter->tr = tr;
4068 iter->trace_buffer = &tr->trace_buffer;
4069 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004070 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004071 filp->private_data = iter;
4072
Steven Rostedt107bad82008-05-12 21:21:01 +02004073 if (iter->trace->pipe_open)
4074 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004075
Arnd Bergmannb4447862010-07-07 23:40:11 +02004076 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004077out:
4078 mutex_unlock(&trace_types_lock);
4079 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004080
4081fail:
4082 kfree(iter->trace);
4083 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004084 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004085 mutex_unlock(&trace_types_lock);
4086 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004087}
4088
4089static int tracing_release_pipe(struct inode *inode, struct file *file)
4090{
4091 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004092 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004093
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004094 mutex_lock(&trace_types_lock);
4095
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004096 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004097 iter->trace->pipe_close(iter);
4098
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004099 mutex_unlock(&trace_types_lock);
4100
Rusty Russell44623442009-01-01 10:12:23 +10304101 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004102 mutex_destroy(&iter->mutex);
4103 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004104 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004105
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004106 trace_array_put(tr);
4107
Steven Rostedtb3806b42008-05-12 21:20:46 +02004108 return 0;
4109}
4110
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004111static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004112trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004113{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004114 /* Iterators are static, they should be filled or empty */
4115 if (trace_buffer_iter(iter, iter->cpu_file))
4116 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004117
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004118 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004119 /*
4120 * Always select as readable when in blocking mode
4121 */
4122 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004123 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004124 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004125 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004126}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004127
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004128static unsigned int
4129tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4130{
4131 struct trace_iterator *iter = filp->private_data;
4132
4133 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004134}
4135
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004136/*
4137 * This is a make-shift waitqueue.
4138 * A tracer might use this callback on some rare cases:
4139 *
4140 * 1) the current tracer might hold the runqueue lock when it wakes up
4141 * a reader, hence a deadlock (sched, function, and function graph tracers)
4142 * 2) the function tracers, trace all functions, we don't want
4143 * the overhead of calling wake_up and friends
4144 * (and tracing them too)
4145 *
4146 * Anyway, this is really very primitive wakeup.
4147 */
4148void poll_wait_pipe(struct trace_iterator *iter)
4149{
4150 set_current_state(TASK_INTERRUPTIBLE);
4151 /* sleep for 100 msecs, and try again. */
4152 schedule_timeout(HZ / 10);
4153}
4154
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004155/* Must be called with trace_types_lock mutex held. */
4156static int tracing_wait_pipe(struct file *filp)
4157{
4158 struct trace_iterator *iter = filp->private_data;
4159
4160 while (trace_empty(iter)) {
4161
4162 if ((filp->f_flags & O_NONBLOCK)) {
4163 return -EAGAIN;
4164 }
4165
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004166 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004167
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004168 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004169
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004170 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004171
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004172 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004173 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004174
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004175 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004176 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004177 * We still block if tracing is disabled, but we have never
4178 * read anything. This allows a user to cat this file, and
4179 * then enable tracing. But after we have read something,
4180 * we give an EOF when tracing is again disabled.
4181 *
4182 * iter->pos will be 0 if we haven't read anything.
4183 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004184 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004185 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004186 }
4187
4188 return 1;
4189}
4190
Steven Rostedtb3806b42008-05-12 21:20:46 +02004191/*
4192 * Consumer reader.
4193 */
4194static ssize_t
4195tracing_read_pipe(struct file *filp, char __user *ubuf,
4196 size_t cnt, loff_t *ppos)
4197{
4198 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004199 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004200 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004201
4202 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004203 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4204 if (sret != -EBUSY)
4205 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004206
Steven Rostedtf9520752009-03-02 14:04:40 -05004207 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004208
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004209 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004210 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004211 if (unlikely(iter->trace->name != tr->current_trace->name))
4212 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004213 mutex_unlock(&trace_types_lock);
4214
4215 /*
4216 * Avoid more than one consumer on a single file descriptor
4217 * This is just a matter of traces coherency, the ring buffer itself
4218 * is protected.
4219 */
4220 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004221 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004222 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4223 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004224 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004225 }
4226
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004227waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004228 sret = tracing_wait_pipe(filp);
4229 if (sret <= 0)
4230 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004231
4232 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004233 if (trace_empty(iter)) {
4234 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004235 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004236 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004237
4238 if (cnt >= PAGE_SIZE)
4239 cnt = PAGE_SIZE - 1;
4240
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004241 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004242 memset(&iter->seq, 0,
4243 sizeof(struct trace_iterator) -
4244 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004245 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004246 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004247
Lai Jiangshan4f535962009-05-18 19:35:34 +08004248 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004249 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004250 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004251 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004252 int len = iter->seq.len;
4253
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004254 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004255 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004256 /* don't print partial lines */
4257 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004258 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004259 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004260 if (ret != TRACE_TYPE_NO_CONSUME)
4261 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004262
4263 if (iter->seq.len >= cnt)
4264 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004265
4266 /*
4267 * Setting the full flag means we reached the trace_seq buffer
4268 * size and we should leave by partial output condition above.
4269 * One of the trace_seq_* functions is not used properly.
4270 */
4271 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4272 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004273 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004274 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004275 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004276
Steven Rostedtb3806b42008-05-12 21:20:46 +02004277 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004278 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4279 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004280 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004281
4282 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004283 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004284 * entries, go back to wait for more entries.
4285 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004286 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004287 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004288
Steven Rostedt107bad82008-05-12 21:21:01 +02004289out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004290 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004291
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004292 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293}
4294
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004295static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4296 unsigned int idx)
4297{
4298 __free_page(spd->pages[idx]);
4299}
4300
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004301static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004302 .can_merge = 0,
4303 .map = generic_pipe_buf_map,
4304 .unmap = generic_pipe_buf_unmap,
4305 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004306 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004307 .steal = generic_pipe_buf_steal,
4308 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004309};
4310
Steven Rostedt34cd4992009-02-09 12:06:29 -05004311static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004312tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004313{
4314 size_t count;
4315 int ret;
4316
4317 /* Seq buffer is page-sized, exactly what we need. */
4318 for (;;) {
4319 count = iter->seq.len;
4320 ret = print_trace_line(iter);
4321 count = iter->seq.len - count;
4322 if (rem < count) {
4323 rem = 0;
4324 iter->seq.len -= count;
4325 break;
4326 }
4327 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4328 iter->seq.len -= count;
4329 break;
4330 }
4331
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004332 if (ret != TRACE_TYPE_NO_CONSUME)
4333 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004334 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004335 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004336 rem = 0;
4337 iter->ent = NULL;
4338 break;
4339 }
4340 }
4341
4342 return rem;
4343}
4344
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004345static ssize_t tracing_splice_read_pipe(struct file *filp,
4346 loff_t *ppos,
4347 struct pipe_inode_info *pipe,
4348 size_t len,
4349 unsigned int flags)
4350{
Jens Axboe35f3d142010-05-20 10:43:18 +02004351 struct page *pages_def[PIPE_DEF_BUFFERS];
4352 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004353 struct trace_iterator *iter = filp->private_data;
4354 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004355 .pages = pages_def,
4356 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004357 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004358 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004359 .flags = flags,
4360 .ops = &tracing_pipe_buf_ops,
4361 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004362 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004363 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004364 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004365 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004366 unsigned int i;
4367
Jens Axboe35f3d142010-05-20 10:43:18 +02004368 if (splice_grow_spd(pipe, &spd))
4369 return -ENOMEM;
4370
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004371 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004372 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004373 if (unlikely(iter->trace->name != tr->current_trace->name))
4374 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004375 mutex_unlock(&trace_types_lock);
4376
4377 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004378
4379 if (iter->trace->splice_read) {
4380 ret = iter->trace->splice_read(iter, filp,
4381 ppos, pipe, len, flags);
4382 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004383 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004384 }
4385
4386 ret = tracing_wait_pipe(filp);
4387 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004388 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004389
Jason Wessel955b61e2010-08-05 09:22:23 -05004390 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004391 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004392 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004393 }
4394
Lai Jiangshan4f535962009-05-18 19:35:34 +08004395 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004396 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004397
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004398 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02004399 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4400 spd.pages[i] = alloc_page(GFP_KERNEL);
4401 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004402 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004403
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004404 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004405
4406 /* Copy the data into the page, so we can start over. */
4407 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004408 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004409 iter->seq.len);
4410 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004411 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004412 break;
4413 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004414 spd.partial[i].offset = 0;
4415 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004416
Steven Rostedtf9520752009-03-02 14:04:40 -05004417 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004418 }
4419
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004420 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004421 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004422 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004423
4424 spd.nr_pages = i;
4425
Jens Axboe35f3d142010-05-20 10:43:18 +02004426 ret = splice_to_pipe(pipe, &spd);
4427out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004428 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004429 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004430
Steven Rostedt34cd4992009-02-09 12:06:29 -05004431out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004432 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004433 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004434}
4435
Steven Rostedta98a3c32008-05-12 21:20:59 +02004436static ssize_t
4437tracing_entries_read(struct file *filp, char __user *ubuf,
4438 size_t cnt, loff_t *ppos)
4439{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004440 struct inode *inode = file_inode(filp);
4441 struct trace_array *tr = inode->i_private;
4442 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004443 char buf[64];
4444 int r = 0;
4445 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004446
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004447 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004448
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004449 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004450 int cpu, buf_size_same;
4451 unsigned long size;
4452
4453 size = 0;
4454 buf_size_same = 1;
4455 /* check if all cpu sizes are same */
4456 for_each_tracing_cpu(cpu) {
4457 /* fill in the size from first enabled cpu */
4458 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004459 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4460 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004461 buf_size_same = 0;
4462 break;
4463 }
4464 }
4465
4466 if (buf_size_same) {
4467 if (!ring_buffer_expanded)
4468 r = sprintf(buf, "%lu (expanded: %lu)\n",
4469 size >> 10,
4470 trace_buf_size >> 10);
4471 else
4472 r = sprintf(buf, "%lu\n", size >> 10);
4473 } else
4474 r = sprintf(buf, "X\n");
4475 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004476 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004477
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004478 mutex_unlock(&trace_types_lock);
4479
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004480 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4481 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004482}
4483
4484static ssize_t
4485tracing_entries_write(struct file *filp, const char __user *ubuf,
4486 size_t cnt, loff_t *ppos)
4487{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004488 struct inode *inode = file_inode(filp);
4489 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004490 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004491 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004492
Peter Huewe22fe9b52011-06-07 21:58:27 +02004493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4494 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004495 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004496
4497 /* must have at least 1 entry */
4498 if (!val)
4499 return -EINVAL;
4500
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004501 /* value is in KB */
4502 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004503 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004504 if (ret < 0)
4505 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004506
Jiri Olsacf8517c2009-10-23 19:36:16 -04004507 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004508
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004509 return cnt;
4510}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004511
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004512static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004513tracing_total_entries_read(struct file *filp, char __user *ubuf,
4514 size_t cnt, loff_t *ppos)
4515{
4516 struct trace_array *tr = filp->private_data;
4517 char buf[64];
4518 int r, cpu;
4519 unsigned long size = 0, expanded_size = 0;
4520
4521 mutex_lock(&trace_types_lock);
4522 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004523 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004524 if (!ring_buffer_expanded)
4525 expanded_size += trace_buf_size >> 10;
4526 }
4527 if (ring_buffer_expanded)
4528 r = sprintf(buf, "%lu\n", size);
4529 else
4530 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4531 mutex_unlock(&trace_types_lock);
4532
4533 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4534}
4535
4536static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004537tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4538 size_t cnt, loff_t *ppos)
4539{
4540 /*
4541 * There is no need to read what the user has written, this function
4542 * is just to make sure that there is no error when "echo" is used
4543 */
4544
4545 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004546
4547 return cnt;
4548}
4549
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004550static int
4551tracing_free_buffer_release(struct inode *inode, struct file *filp)
4552{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004553 struct trace_array *tr = inode->i_private;
4554
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004555 /* disable tracing ? */
4556 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004557 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004558 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004559 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004560
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004561 trace_array_put(tr);
4562
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004563 return 0;
4564}
4565
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004566static ssize_t
4567tracing_mark_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *fpos)
4569{
Steven Rostedtd696b582011-09-22 11:50:27 -04004570 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004571 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004572 struct ring_buffer_event *event;
4573 struct ring_buffer *buffer;
4574 struct print_entry *entry;
4575 unsigned long irq_flags;
4576 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004577 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004578 int nr_pages = 1;
4579 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004580 int offset;
4581 int size;
4582 int len;
4583 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004584 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004585
Steven Rostedtc76f0692008-11-07 22:36:02 -05004586 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004587 return -EINVAL;
4588
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004589 if (!(trace_flags & TRACE_ITER_MARKERS))
4590 return -EINVAL;
4591
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004592 if (cnt > TRACE_BUF_SIZE)
4593 cnt = TRACE_BUF_SIZE;
4594
Steven Rostedtd696b582011-09-22 11:50:27 -04004595 /*
4596 * Userspace is injecting traces into the kernel trace buffer.
4597 * We want to be as non intrusive as possible.
4598 * To do so, we do not want to allocate any special buffers
4599 * or take any locks, but instead write the userspace data
4600 * straight into the ring buffer.
4601 *
4602 * First we need to pin the userspace buffer into memory,
4603 * which, most likely it is, because it just referenced it.
4604 * But there's no guarantee that it is. By using get_user_pages_fast()
4605 * and kmap_atomic/kunmap_atomic() we can get access to the
4606 * pages directly. We then write the data directly into the
4607 * ring buffer.
4608 */
4609 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004610
Steven Rostedtd696b582011-09-22 11:50:27 -04004611 /* check if we cross pages */
4612 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4613 nr_pages = 2;
4614
4615 offset = addr & (PAGE_SIZE - 1);
4616 addr &= PAGE_MASK;
4617
4618 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4619 if (ret < nr_pages) {
4620 while (--ret >= 0)
4621 put_page(pages[ret]);
4622 written = -EFAULT;
4623 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004624 }
4625
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004626 for (i = 0; i < nr_pages; i++)
4627 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004628
4629 local_save_flags(irq_flags);
4630 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004631 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004632 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4633 irq_flags, preempt_count());
4634 if (!event) {
4635 /* Ring buffer disabled, return as if not open for write */
4636 written = -EBADF;
4637 goto out_unlock;
4638 }
4639
4640 entry = ring_buffer_event_data(event);
4641 entry->ip = _THIS_IP_;
4642
4643 if (nr_pages == 2) {
4644 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004645 memcpy(&entry->buf, map_page[0] + offset, len);
4646 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004647 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004648 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004649
4650 if (entry->buf[cnt - 1] != '\n') {
4651 entry->buf[cnt] = '\n';
4652 entry->buf[cnt + 1] = '\0';
4653 } else
4654 entry->buf[cnt] = '\0';
4655
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004656 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004657
4658 written = cnt;
4659
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004660 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004661
Steven Rostedtd696b582011-09-22 11:50:27 -04004662 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004663 for (i = 0; i < nr_pages; i++){
4664 kunmap_atomic(map_page[i]);
4665 put_page(pages[i]);
4666 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004667 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004668 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004669}
4670
Li Zefan13f16d22009-12-08 11:16:11 +08004671static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004672{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004673 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004674 int i;
4675
4676 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004677 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004678 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004679 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4680 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004681 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004682
Li Zefan13f16d22009-12-08 11:16:11 +08004683 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004684}
4685
4686static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4687 size_t cnt, loff_t *fpos)
4688{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004689 struct seq_file *m = filp->private_data;
4690 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004691 char buf[64];
4692 const char *clockstr;
4693 int i;
4694
4695 if (cnt >= sizeof(buf))
4696 return -EINVAL;
4697
4698 if (copy_from_user(&buf, ubuf, cnt))
4699 return -EFAULT;
4700
4701 buf[cnt] = 0;
4702
4703 clockstr = strstrip(buf);
4704
4705 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4706 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4707 break;
4708 }
4709 if (i == ARRAY_SIZE(trace_clocks))
4710 return -EINVAL;
4711
Zhaolei5079f322009-08-25 16:12:56 +08004712 mutex_lock(&trace_types_lock);
4713
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004714 tr->clock_id = i;
4715
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004716 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004717
David Sharp60303ed2012-10-11 16:27:52 -07004718 /*
4719 * New clock may not be consistent with the previous clock.
4720 * Reset the buffer so that it doesn't have incomparable timestamps.
4721 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004722 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004723
4724#ifdef CONFIG_TRACER_MAX_TRACE
4725 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4726 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004727 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004728#endif
David Sharp60303ed2012-10-11 16:27:52 -07004729
Zhaolei5079f322009-08-25 16:12:56 +08004730 mutex_unlock(&trace_types_lock);
4731
4732 *fpos += cnt;
4733
4734 return cnt;
4735}
4736
Li Zefan13f16d22009-12-08 11:16:11 +08004737static int tracing_clock_open(struct inode *inode, struct file *file)
4738{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004739 struct trace_array *tr = inode->i_private;
4740 int ret;
4741
Li Zefan13f16d22009-12-08 11:16:11 +08004742 if (tracing_disabled)
4743 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004744
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004745 if (trace_array_get(tr))
4746 return -ENODEV;
4747
4748 ret = single_open(file, tracing_clock_show, inode->i_private);
4749 if (ret < 0)
4750 trace_array_put(tr);
4751
4752 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004753}
4754
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004755struct ftrace_buffer_info {
4756 struct trace_iterator iter;
4757 void *spare;
4758 unsigned int read;
4759};
4760
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004761#ifdef CONFIG_TRACER_SNAPSHOT
4762static int tracing_snapshot_open(struct inode *inode, struct file *file)
4763{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004764 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004765 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004766 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004767 int ret = 0;
4768
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004769 if (trace_array_get(tr) < 0)
4770 return -ENODEV;
4771
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004772 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004773 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004774 if (IS_ERR(iter))
4775 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004776 } else {
4777 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004778 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004779 m = kzalloc(sizeof(*m), GFP_KERNEL);
4780 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004781 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004782 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4783 if (!iter) {
4784 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004785 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004786 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004787 ret = 0;
4788
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004789 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004790 iter->trace_buffer = &tr->max_buffer;
4791 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004792 m->private = iter;
4793 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004794 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004795out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004796 if (ret < 0)
4797 trace_array_put(tr);
4798
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004799 return ret;
4800}
4801
4802static ssize_t
4803tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4804 loff_t *ppos)
4805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004806 struct seq_file *m = filp->private_data;
4807 struct trace_iterator *iter = m->private;
4808 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004809 unsigned long val;
4810 int ret;
4811
4812 ret = tracing_update_buffers();
4813 if (ret < 0)
4814 return ret;
4815
4816 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4817 if (ret)
4818 return ret;
4819
4820 mutex_lock(&trace_types_lock);
4821
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004822 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004823 ret = -EBUSY;
4824 goto out;
4825 }
4826
4827 switch (val) {
4828 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004829 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4830 ret = -EINVAL;
4831 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004832 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004833 if (tr->allocated_snapshot)
4834 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004835 break;
4836 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004837/* Only allow per-cpu swap if the ring buffer supports it */
4838#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4839 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4840 ret = -EINVAL;
4841 break;
4842 }
4843#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004844 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004845 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004846 if (ret < 0)
4847 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004848 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004849 local_irq_disable();
4850 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004851 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004852 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004853 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004854 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004855 local_irq_enable();
4856 break;
4857 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004858 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004859 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4860 tracing_reset_online_cpus(&tr->max_buffer);
4861 else
4862 tracing_reset(&tr->max_buffer, iter->cpu_file);
4863 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004864 break;
4865 }
4866
4867 if (ret >= 0) {
4868 *ppos += cnt;
4869 ret = cnt;
4870 }
4871out:
4872 mutex_unlock(&trace_types_lock);
4873 return ret;
4874}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004875
4876static int tracing_snapshot_release(struct inode *inode, struct file *file)
4877{
4878 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004879 int ret;
4880
4881 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004882
4883 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004884 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004885
4886 /* If write only, the seq_file is just a stub */
4887 if (m)
4888 kfree(m->private);
4889 kfree(m);
4890
4891 return 0;
4892}
4893
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004894static int tracing_buffers_open(struct inode *inode, struct file *filp);
4895static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4896 size_t count, loff_t *ppos);
4897static int tracing_buffers_release(struct inode *inode, struct file *file);
4898static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4899 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4900
4901static int snapshot_raw_open(struct inode *inode, struct file *filp)
4902{
4903 struct ftrace_buffer_info *info;
4904 int ret;
4905
4906 ret = tracing_buffers_open(inode, filp);
4907 if (ret < 0)
4908 return ret;
4909
4910 info = filp->private_data;
4911
4912 if (info->iter.trace->use_max_tr) {
4913 tracing_buffers_release(inode, filp);
4914 return -EBUSY;
4915 }
4916
4917 info->iter.snapshot = true;
4918 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4919
4920 return ret;
4921}
4922
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004923#endif /* CONFIG_TRACER_SNAPSHOT */
4924
4925
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004926static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004927 .open = tracing_open_generic,
4928 .read = tracing_max_lat_read,
4929 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004930 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004931};
4932
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004933static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004934 .open = tracing_open_generic,
4935 .read = tracing_set_trace_read,
4936 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004937 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004938};
4939
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004940static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004941 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004942 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004943 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004944 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004945 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004946 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02004947};
4948
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004949static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004950 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004951 .read = tracing_entries_read,
4952 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004953 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004954 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004955};
4956
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004957static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004958 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004959 .read = tracing_total_entries_read,
4960 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004961 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004962};
4963
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004964static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004965 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004966 .write = tracing_free_buffer_write,
4967 .release = tracing_free_buffer_release,
4968};
4969
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004970static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004971 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004972 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004973 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004974 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004975};
4976
Zhaolei5079f322009-08-25 16:12:56 +08004977static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08004978 .open = tracing_clock_open,
4979 .read = seq_read,
4980 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004981 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08004982 .write = tracing_clock_write,
4983};
4984
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004985#ifdef CONFIG_TRACER_SNAPSHOT
4986static const struct file_operations snapshot_fops = {
4987 .open = tracing_snapshot_open,
4988 .read = seq_read,
4989 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004990 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004991 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004992};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004993
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004994static const struct file_operations snapshot_raw_fops = {
4995 .open = snapshot_raw_open,
4996 .read = tracing_buffers_read,
4997 .release = tracing_buffers_release,
4998 .splice_read = tracing_buffers_splice_read,
4999 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005000};
5001
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005002#endif /* CONFIG_TRACER_SNAPSHOT */
5003
Steven Rostedt2cadf912008-12-01 22:20:19 -05005004static int tracing_buffers_open(struct inode *inode, struct file *filp)
5005{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005006 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005007 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005008 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005009
5010 if (tracing_disabled)
5011 return -ENODEV;
5012
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005013 if (trace_array_get(tr) < 0)
5014 return -ENODEV;
5015
Steven Rostedt2cadf912008-12-01 22:20:19 -05005016 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005017 if (!info) {
5018 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005019 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005020 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005021
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005022 mutex_lock(&trace_types_lock);
5023
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005024 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005025 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005026 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005027 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005028 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005029 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005030 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005031
5032 filp->private_data = info;
5033
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005034 mutex_unlock(&trace_types_lock);
5035
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005036 ret = nonseekable_open(inode, filp);
5037 if (ret < 0)
5038 trace_array_put(tr);
5039
5040 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005041}
5042
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005043static unsigned int
5044tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5045{
5046 struct ftrace_buffer_info *info = filp->private_data;
5047 struct trace_iterator *iter = &info->iter;
5048
5049 return trace_poll(iter, filp, poll_table);
5050}
5051
Steven Rostedt2cadf912008-12-01 22:20:19 -05005052static ssize_t
5053tracing_buffers_read(struct file *filp, char __user *ubuf,
5054 size_t count, loff_t *ppos)
5055{
5056 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005057 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005058 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005059 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005060
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005061 if (!count)
5062 return 0;
5063
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005064 mutex_lock(&trace_types_lock);
5065
5066#ifdef CONFIG_TRACER_MAX_TRACE
5067 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5068 size = -EBUSY;
5069 goto out_unlock;
5070 }
5071#endif
5072
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005073 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005074 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5075 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005076 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005077 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005078 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005079
Steven Rostedt2cadf912008-12-01 22:20:19 -05005080 /* Do we have previous read data to read? */
5081 if (info->read < PAGE_SIZE)
5082 goto read;
5083
Steven Rostedtb6273442013-02-28 13:44:11 -05005084 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005085 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005086 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005087 &info->spare,
5088 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005089 iter->cpu_file, 0);
5090 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005091
5092 if (ret < 0) {
5093 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005094 if ((filp->f_flags & O_NONBLOCK)) {
5095 size = -EAGAIN;
5096 goto out_unlock;
5097 }
5098 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005099 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005100 mutex_lock(&trace_types_lock);
5101 if (signal_pending(current)) {
5102 size = -EINTR;
5103 goto out_unlock;
5104 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005105 goto again;
5106 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005107 size = 0;
5108 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005109 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005110
Steven Rostedt436fc282011-10-14 10:44:25 -04005111 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005112 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005113 size = PAGE_SIZE - info->read;
5114 if (size > count)
5115 size = count;
5116
5117 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005118 if (ret == size) {
5119 size = -EFAULT;
5120 goto out_unlock;
5121 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005122 size -= ret;
5123
Steven Rostedt2cadf912008-12-01 22:20:19 -05005124 *ppos += size;
5125 info->read += size;
5126
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005127 out_unlock:
5128 mutex_unlock(&trace_types_lock);
5129
Steven Rostedt2cadf912008-12-01 22:20:19 -05005130 return size;
5131}
5132
5133static int tracing_buffers_release(struct inode *inode, struct file *file)
5134{
5135 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005136 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005137
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005138 mutex_lock(&trace_types_lock);
5139
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005140 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005141
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005142 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005143 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005144 kfree(info);
5145
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005146 mutex_unlock(&trace_types_lock);
5147
Steven Rostedt2cadf912008-12-01 22:20:19 -05005148 return 0;
5149}
5150
5151struct buffer_ref {
5152 struct ring_buffer *buffer;
5153 void *page;
5154 int ref;
5155};
5156
5157static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5158 struct pipe_buffer *buf)
5159{
5160 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5161
5162 if (--ref->ref)
5163 return;
5164
5165 ring_buffer_free_read_page(ref->buffer, ref->page);
5166 kfree(ref);
5167 buf->private = 0;
5168}
5169
Steven Rostedt2cadf912008-12-01 22:20:19 -05005170static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5171 struct pipe_buffer *buf)
5172{
5173 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5174
5175 ref->ref++;
5176}
5177
5178/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005179static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005180 .can_merge = 0,
5181 .map = generic_pipe_buf_map,
5182 .unmap = generic_pipe_buf_unmap,
5183 .confirm = generic_pipe_buf_confirm,
5184 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005185 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005186 .get = buffer_pipe_buf_get,
5187};
5188
5189/*
5190 * Callback from splice_to_pipe(), if we need to release some pages
5191 * at the end of the spd in case we error'ed out in filling the pipe.
5192 */
5193static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5194{
5195 struct buffer_ref *ref =
5196 (struct buffer_ref *)spd->partial[i].private;
5197
5198 if (--ref->ref)
5199 return;
5200
5201 ring_buffer_free_read_page(ref->buffer, ref->page);
5202 kfree(ref);
5203 spd->partial[i].private = 0;
5204}
5205
5206static ssize_t
5207tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5208 struct pipe_inode_info *pipe, size_t len,
5209 unsigned int flags)
5210{
5211 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005212 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005213 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5214 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005215 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005216 .pages = pages_def,
5217 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005218 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005219 .flags = flags,
5220 .ops = &buffer_pipe_buf_ops,
5221 .spd_release = buffer_spd_release,
5222 };
5223 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005224 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005225 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005226
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005227 mutex_lock(&trace_types_lock);
5228
5229#ifdef CONFIG_TRACER_MAX_TRACE
5230 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5231 ret = -EBUSY;
5232 goto out;
5233 }
5234#endif
5235
5236 if (splice_grow_spd(pipe, &spd)) {
5237 ret = -ENOMEM;
5238 goto out;
5239 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005240
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005241 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005242 ret = -EINVAL;
5243 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005244 }
5245
5246 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005247 if (len < PAGE_SIZE) {
5248 ret = -EINVAL;
5249 goto out;
5250 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005251 len &= PAGE_MASK;
5252 }
5253
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005254 again:
5255 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005256 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005257
Jens Axboe35f3d142010-05-20 10:43:18 +02005258 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005259 struct page *page;
5260 int r;
5261
5262 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5263 if (!ref)
5264 break;
5265
Steven Rostedt7267fa62009-04-29 00:16:21 -04005266 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005267 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005268 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005269 if (!ref->page) {
5270 kfree(ref);
5271 break;
5272 }
5273
5274 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005275 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005277 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005278 kfree(ref);
5279 break;
5280 }
5281
5282 /*
5283 * zero out any left over data, this is going to
5284 * user land.
5285 */
5286 size = ring_buffer_page_len(ref->page);
5287 if (size < PAGE_SIZE)
5288 memset(ref->page + size, 0, PAGE_SIZE - size);
5289
5290 page = virt_to_page(ref->page);
5291
5292 spd.pages[i] = page;
5293 spd.partial[i].len = PAGE_SIZE;
5294 spd.partial[i].offset = 0;
5295 spd.partial[i].private = (unsigned long)ref;
5296 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005297 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005298
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005299 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005300 }
5301
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005302 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005303 spd.nr_pages = i;
5304
5305 /* did we read anything? */
5306 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005307 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005308 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005309 goto out;
5310 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005311 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005312 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005313 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005314 if (signal_pending(current)) {
5315 ret = -EINTR;
5316 goto out;
5317 }
5318 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005319 }
5320
5321 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005322 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005323out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005324 mutex_unlock(&trace_types_lock);
5325
Steven Rostedt2cadf912008-12-01 22:20:19 -05005326 return ret;
5327}
5328
5329static const struct file_operations tracing_buffers_fops = {
5330 .open = tracing_buffers_open,
5331 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005332 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005333 .release = tracing_buffers_release,
5334 .splice_read = tracing_buffers_splice_read,
5335 .llseek = no_llseek,
5336};
5337
Steven Rostedtc8d77182009-04-29 18:03:45 -04005338static ssize_t
5339tracing_stats_read(struct file *filp, char __user *ubuf,
5340 size_t count, loff_t *ppos)
5341{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005342 struct inode *inode = file_inode(filp);
5343 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005344 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005345 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005346 struct trace_seq *s;
5347 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005348 unsigned long long t;
5349 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005350
Li Zefane4f2d102009-06-15 10:57:28 +08005351 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005352 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005353 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005354
5355 trace_seq_init(s);
5356
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005357 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005358 trace_seq_printf(s, "entries: %ld\n", cnt);
5359
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005360 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005361 trace_seq_printf(s, "overrun: %ld\n", cnt);
5362
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005363 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005364 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5365
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005366 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005367 trace_seq_printf(s, "bytes: %ld\n", cnt);
5368
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005369 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005370 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005371 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005372 usec_rem = do_div(t, USEC_PER_SEC);
5373 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5374 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005375
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005376 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005377 usec_rem = do_div(t, USEC_PER_SEC);
5378 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5379 } else {
5380 /* counter or tsc mode for trace_clock */
5381 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005382 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005383
5384 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005385 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005386 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005388 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005389 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005391 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005392 trace_seq_printf(s, "read events: %ld\n", cnt);
5393
Steven Rostedtc8d77182009-04-29 18:03:45 -04005394 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5395
5396 kfree(s);
5397
5398 return count;
5399}
5400
5401static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005402 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005403 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005404 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005405 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005406};
5407
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005408#ifdef CONFIG_DYNAMIC_FTRACE
5409
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005410int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005411{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005412 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005413}
5414
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005415static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005416tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005417 size_t cnt, loff_t *ppos)
5418{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005419 static char ftrace_dyn_info_buffer[1024];
5420 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005421 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005422 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005423 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005424 int r;
5425
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005426 mutex_lock(&dyn_info_mutex);
5427 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005428
Steven Rostedta26a2a22008-10-31 00:03:22 -04005429 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005430 buf[r++] = '\n';
5431
5432 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5433
5434 mutex_unlock(&dyn_info_mutex);
5435
5436 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005437}
5438
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005439static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005440 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005441 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005442 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005443};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005444#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005445
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005446#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5447static void
5448ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005449{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005450 tracing_snapshot();
5451}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005452
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005453static void
5454ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5455{
5456 unsigned long *count = (long *)data;
5457
5458 if (!*count)
5459 return;
5460
5461 if (*count != -1)
5462 (*count)--;
5463
5464 tracing_snapshot();
5465}
5466
5467static int
5468ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5469 struct ftrace_probe_ops *ops, void *data)
5470{
5471 long count = (long)data;
5472
5473 seq_printf(m, "%ps:", (void *)ip);
5474
5475 seq_printf(m, "snapshot");
5476
5477 if (count == -1)
5478 seq_printf(m, ":unlimited\n");
5479 else
5480 seq_printf(m, ":count=%ld\n", count);
5481
5482 return 0;
5483}
5484
5485static struct ftrace_probe_ops snapshot_probe_ops = {
5486 .func = ftrace_snapshot,
5487 .print = ftrace_snapshot_print,
5488};
5489
5490static struct ftrace_probe_ops snapshot_count_probe_ops = {
5491 .func = ftrace_count_snapshot,
5492 .print = ftrace_snapshot_print,
5493};
5494
5495static int
5496ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5497 char *glob, char *cmd, char *param, int enable)
5498{
5499 struct ftrace_probe_ops *ops;
5500 void *count = (void *)-1;
5501 char *number;
5502 int ret;
5503
5504 /* hash funcs only work with set_ftrace_filter */
5505 if (!enable)
5506 return -EINVAL;
5507
5508 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5509
5510 if (glob[0] == '!') {
5511 unregister_ftrace_function_probe_func(glob+1, ops);
5512 return 0;
5513 }
5514
5515 if (!param)
5516 goto out_reg;
5517
5518 number = strsep(&param, ":");
5519
5520 if (!strlen(number))
5521 goto out_reg;
5522
5523 /*
5524 * We use the callback data field (which is a pointer)
5525 * as our counter.
5526 */
5527 ret = kstrtoul(number, 0, (unsigned long *)&count);
5528 if (ret)
5529 return ret;
5530
5531 out_reg:
5532 ret = register_ftrace_function_probe(glob, ops, count);
5533
5534 if (ret >= 0)
5535 alloc_snapshot(&global_trace);
5536
5537 return ret < 0 ? ret : 0;
5538}
5539
5540static struct ftrace_func_command ftrace_snapshot_cmd = {
5541 .name = "snapshot",
5542 .func = ftrace_trace_snapshot_callback,
5543};
5544
Tom Zanussi38de93a2013-10-24 08:34:18 -05005545static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005546{
5547 return register_ftrace_command(&ftrace_snapshot_cmd);
5548}
5549#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005550static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005551#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005552
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005553struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005554{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005555 if (tr->dir)
5556 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005557
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005558 if (!debugfs_initialized())
5559 return NULL;
5560
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005561 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5562 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005563
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005564 if (!tr->dir)
5565 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005566
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005567 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005568}
5569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005570struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005571{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005572 return tracing_init_dentry_tr(&global_trace);
5573}
5574
5575static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5576{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005577 struct dentry *d_tracer;
5578
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005579 if (tr->percpu_dir)
5580 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005582 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005583 if (!d_tracer)
5584 return NULL;
5585
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005586 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005587
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005588 WARN_ONCE(!tr->percpu_dir,
5589 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005590
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005591 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005592}
5593
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005594static struct dentry *
5595trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5596 void *data, long cpu, const struct file_operations *fops)
5597{
5598 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5599
5600 if (ret) /* See tracing_get_cpu() */
5601 ret->d_inode->i_cdev = (void *)(cpu + 1);
5602 return ret;
5603}
5604
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005605static void
5606tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005607{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005608 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005609 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005610 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005611
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005612 if (!d_percpu)
5613 return;
5614
Steven Rostedtdd49a382010-10-20 21:51:26 -04005615 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005616 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5617 if (!d_cpu) {
5618 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5619 return;
5620 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005621
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005622 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005623 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005624 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005625
5626 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005627 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005628 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005629
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005630 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005631 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005632
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005633 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005634 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005635
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005636 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005637 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005638
5639#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005640 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005641 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005642
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005643 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005644 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005645#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005646}
5647
Steven Rostedt60a11772008-05-12 21:20:44 +02005648#ifdef CONFIG_FTRACE_SELFTEST
5649/* Let selftest have access to static functions in this file */
5650#include "trace_selftest.c"
5651#endif
5652
Steven Rostedt577b7852009-02-26 23:43:05 -05005653struct trace_option_dentry {
5654 struct tracer_opt *opt;
5655 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005656 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005657 struct dentry *entry;
5658};
5659
5660static ssize_t
5661trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5662 loff_t *ppos)
5663{
5664 struct trace_option_dentry *topt = filp->private_data;
5665 char *buf;
5666
5667 if (topt->flags->val & topt->opt->bit)
5668 buf = "1\n";
5669 else
5670 buf = "0\n";
5671
5672 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5673}
5674
5675static ssize_t
5676trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5677 loff_t *ppos)
5678{
5679 struct trace_option_dentry *topt = filp->private_data;
5680 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005681 int ret;
5682
Peter Huewe22fe9b52011-06-07 21:58:27 +02005683 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5684 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005685 return ret;
5686
Li Zefan8d18eaa2009-12-08 11:17:06 +08005687 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005688 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005689
5690 if (!!(topt->flags->val & topt->opt->bit) != val) {
5691 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005692 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005693 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005694 mutex_unlock(&trace_types_lock);
5695 if (ret)
5696 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005697 }
5698
5699 *ppos += cnt;
5700
5701 return cnt;
5702}
5703
5704
5705static const struct file_operations trace_options_fops = {
5706 .open = tracing_open_generic,
5707 .read = trace_options_read,
5708 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005709 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005710};
5711
Steven Rostedta8259072009-02-26 22:19:12 -05005712static ssize_t
5713trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5714 loff_t *ppos)
5715{
5716 long index = (long)filp->private_data;
5717 char *buf;
5718
5719 if (trace_flags & (1 << index))
5720 buf = "1\n";
5721 else
5722 buf = "0\n";
5723
5724 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5725}
5726
5727static ssize_t
5728trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5729 loff_t *ppos)
5730{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005731 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005732 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005733 unsigned long val;
5734 int ret;
5735
Peter Huewe22fe9b52011-06-07 21:58:27 +02005736 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5737 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005738 return ret;
5739
Zhaoleif2d84b62009-08-07 18:55:48 +08005740 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005741 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005742
5743 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005744 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005745 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005746
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005747 if (ret < 0)
5748 return ret;
5749
Steven Rostedta8259072009-02-26 22:19:12 -05005750 *ppos += cnt;
5751
5752 return cnt;
5753}
5754
Steven Rostedta8259072009-02-26 22:19:12 -05005755static const struct file_operations trace_options_core_fops = {
5756 .open = tracing_open_generic,
5757 .read = trace_options_core_read,
5758 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005759 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005760};
5761
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005762struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005763 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005764 struct dentry *parent,
5765 void *data,
5766 const struct file_operations *fops)
5767{
5768 struct dentry *ret;
5769
5770 ret = debugfs_create_file(name, mode, parent, data, fops);
5771 if (!ret)
5772 pr_warning("Could not create debugfs '%s' entry\n", name);
5773
5774 return ret;
5775}
5776
5777
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005778static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005779{
5780 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005781
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005782 if (tr->options)
5783 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005784
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005785 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005786 if (!d_tracer)
5787 return NULL;
5788
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005789 tr->options = debugfs_create_dir("options", d_tracer);
5790 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005791 pr_warning("Could not create debugfs directory 'options'\n");
5792 return NULL;
5793 }
5794
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005795 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005796}
5797
Steven Rostedt577b7852009-02-26 23:43:05 -05005798static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005799create_trace_option_file(struct trace_array *tr,
5800 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005801 struct tracer_flags *flags,
5802 struct tracer_opt *opt)
5803{
5804 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005805
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005806 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005807 if (!t_options)
5808 return;
5809
5810 topt->flags = flags;
5811 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005812 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005813
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005814 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005815 &trace_options_fops);
5816
Steven Rostedt577b7852009-02-26 23:43:05 -05005817}
5818
5819static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005820create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005821{
5822 struct trace_option_dentry *topts;
5823 struct tracer_flags *flags;
5824 struct tracer_opt *opts;
5825 int cnt;
5826
5827 if (!tracer)
5828 return NULL;
5829
5830 flags = tracer->flags;
5831
5832 if (!flags || !flags->opts)
5833 return NULL;
5834
5835 opts = flags->opts;
5836
5837 for (cnt = 0; opts[cnt].name; cnt++)
5838 ;
5839
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005840 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005841 if (!topts)
5842 return NULL;
5843
5844 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005845 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005846 &opts[cnt]);
5847
5848 return topts;
5849}
5850
5851static void
5852destroy_trace_option_files(struct trace_option_dentry *topts)
5853{
5854 int cnt;
5855
5856 if (!topts)
5857 return;
5858
5859 for (cnt = 0; topts[cnt].opt; cnt++) {
5860 if (topts[cnt].entry)
5861 debugfs_remove(topts[cnt].entry);
5862 }
5863
5864 kfree(topts);
5865}
5866
Steven Rostedta8259072009-02-26 22:19:12 -05005867static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005868create_trace_option_core_file(struct trace_array *tr,
5869 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005870{
5871 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005872
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005873 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005874 if (!t_options)
5875 return NULL;
5876
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005877 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005878 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005879}
5880
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005881static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005882{
5883 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005884 int i;
5885
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005886 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005887 if (!t_options)
5888 return;
5889
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005890 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005891 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005892}
5893
Steven Rostedt499e5472012-02-22 15:50:28 -05005894static ssize_t
5895rb_simple_read(struct file *filp, char __user *ubuf,
5896 size_t cnt, loff_t *ppos)
5897{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005898 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005899 char buf[64];
5900 int r;
5901
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005902 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005903 r = sprintf(buf, "%d\n", r);
5904
5905 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5906}
5907
5908static ssize_t
5909rb_simple_write(struct file *filp, const char __user *ubuf,
5910 size_t cnt, loff_t *ppos)
5911{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005912 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005913 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005914 unsigned long val;
5915 int ret;
5916
5917 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5918 if (ret)
5919 return ret;
5920
5921 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005922 mutex_lock(&trace_types_lock);
5923 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005924 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005925 if (tr->current_trace->start)
5926 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005927 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005928 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005929 if (tr->current_trace->stop)
5930 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005931 }
5932 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005933 }
5934
5935 (*ppos)++;
5936
5937 return cnt;
5938}
5939
5940static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005941 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005942 .read = rb_simple_read,
5943 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005944 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05005945 .llseek = default_llseek,
5946};
5947
Steven Rostedt277ba042012-08-03 16:10:49 -04005948struct dentry *trace_instance_dir;
5949
5950static void
5951init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5952
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005953static int
5954allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04005955{
5956 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005957
5958 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5959
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05005960 buf->tr = tr;
5961
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005962 buf->buffer = ring_buffer_alloc(size, rb_flags);
5963 if (!buf->buffer)
5964 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005965
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005966 buf->data = alloc_percpu(struct trace_array_cpu);
5967 if (!buf->data) {
5968 ring_buffer_free(buf->buffer);
5969 return -ENOMEM;
5970 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005971
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005972 /* Allocate the first page for all buffers */
5973 set_buffer_entries(&tr->trace_buffer,
5974 ring_buffer_size(tr->trace_buffer.buffer, 0));
5975
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005976 return 0;
5977}
5978
5979static int allocate_trace_buffers(struct trace_array *tr, int size)
5980{
5981 int ret;
5982
5983 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
5984 if (ret)
5985 return ret;
5986
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005987#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005988 ret = allocate_trace_buffer(tr, &tr->max_buffer,
5989 allocate_snapshot ? size : 1);
5990 if (WARN_ON(ret)) {
5991 ring_buffer_free(tr->trace_buffer.buffer);
5992 free_percpu(tr->trace_buffer.data);
5993 return -ENOMEM;
5994 }
5995 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005996
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005997 /*
5998 * Only the top level trace array gets its snapshot allocated
5999 * from the kernel command line.
6000 */
6001 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006002#endif
6003 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006004}
6005
6006static int new_instance_create(const char *name)
6007{
Steven Rostedt277ba042012-08-03 16:10:49 -04006008 struct trace_array *tr;
6009 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006010
6011 mutex_lock(&trace_types_lock);
6012
6013 ret = -EEXIST;
6014 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6015 if (tr->name && strcmp(tr->name, name) == 0)
6016 goto out_unlock;
6017 }
6018
6019 ret = -ENOMEM;
6020 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6021 if (!tr)
6022 goto out_unlock;
6023
6024 tr->name = kstrdup(name, GFP_KERNEL);
6025 if (!tr->name)
6026 goto out_free_tr;
6027
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006028 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6029 goto out_free_tr;
6030
6031 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6032
Steven Rostedt277ba042012-08-03 16:10:49 -04006033 raw_spin_lock_init(&tr->start_lock);
6034
6035 tr->current_trace = &nop_trace;
6036
6037 INIT_LIST_HEAD(&tr->systems);
6038 INIT_LIST_HEAD(&tr->events);
6039
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006040 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006041 goto out_free_tr;
6042
Steven Rostedt277ba042012-08-03 16:10:49 -04006043 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6044 if (!tr->dir)
6045 goto out_free_tr;
6046
6047 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006048 if (ret) {
6049 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006050 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006051 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006052
6053 init_tracer_debugfs(tr, tr->dir);
6054
6055 list_add(&tr->list, &ftrace_trace_arrays);
6056
6057 mutex_unlock(&trace_types_lock);
6058
6059 return 0;
6060
6061 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006062 if (tr->trace_buffer.buffer)
6063 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006064 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006065 kfree(tr->name);
6066 kfree(tr);
6067
6068 out_unlock:
6069 mutex_unlock(&trace_types_lock);
6070
6071 return ret;
6072
6073}
6074
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006075static int instance_delete(const char *name)
6076{
6077 struct trace_array *tr;
6078 int found = 0;
6079 int ret;
6080
6081 mutex_lock(&trace_types_lock);
6082
6083 ret = -ENODEV;
6084 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6085 if (tr->name && strcmp(tr->name, name) == 0) {
6086 found = 1;
6087 break;
6088 }
6089 }
6090 if (!found)
6091 goto out_unlock;
6092
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006093 ret = -EBUSY;
6094 if (tr->ref)
6095 goto out_unlock;
6096
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006097 list_del(&tr->list);
6098
6099 event_trace_del_tracer(tr);
6100 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006101 free_percpu(tr->trace_buffer.data);
6102 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006103
6104 kfree(tr->name);
6105 kfree(tr);
6106
6107 ret = 0;
6108
6109 out_unlock:
6110 mutex_unlock(&trace_types_lock);
6111
6112 return ret;
6113}
6114
Steven Rostedt277ba042012-08-03 16:10:49 -04006115static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6116{
6117 struct dentry *parent;
6118 int ret;
6119
6120 /* Paranoid: Make sure the parent is the "instances" directory */
6121 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6122 if (WARN_ON_ONCE(parent != trace_instance_dir))
6123 return -ENOENT;
6124
6125 /*
6126 * The inode mutex is locked, but debugfs_create_dir() will also
6127 * take the mutex. As the instances directory can not be destroyed
6128 * or changed in any other way, it is safe to unlock it, and
6129 * let the dentry try. If two users try to make the same dir at
6130 * the same time, then the new_instance_create() will determine the
6131 * winner.
6132 */
6133 mutex_unlock(&inode->i_mutex);
6134
6135 ret = new_instance_create(dentry->d_iname);
6136
6137 mutex_lock(&inode->i_mutex);
6138
6139 return ret;
6140}
6141
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006142static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6143{
6144 struct dentry *parent;
6145 int ret;
6146
6147 /* Paranoid: Make sure the parent is the "instances" directory */
6148 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6149 if (WARN_ON_ONCE(parent != trace_instance_dir))
6150 return -ENOENT;
6151
6152 /* The caller did a dget() on dentry */
6153 mutex_unlock(&dentry->d_inode->i_mutex);
6154
6155 /*
6156 * The inode mutex is locked, but debugfs_create_dir() will also
6157 * take the mutex. As the instances directory can not be destroyed
6158 * or changed in any other way, it is safe to unlock it, and
6159 * let the dentry try. If two users try to make the same dir at
6160 * the same time, then the instance_delete() will determine the
6161 * winner.
6162 */
6163 mutex_unlock(&inode->i_mutex);
6164
6165 ret = instance_delete(dentry->d_iname);
6166
6167 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6168 mutex_lock(&dentry->d_inode->i_mutex);
6169
6170 return ret;
6171}
6172
Steven Rostedt277ba042012-08-03 16:10:49 -04006173static const struct inode_operations instance_dir_inode_operations = {
6174 .lookup = simple_lookup,
6175 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006176 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006177};
6178
6179static __init void create_trace_instances(struct dentry *d_tracer)
6180{
6181 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6182 if (WARN_ON(!trace_instance_dir))
6183 return;
6184
6185 /* Hijack the dir inode operations, to allow mkdir */
6186 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6187}
6188
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006189static void
6190init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6191{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006192 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006193
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006194 trace_create_file("tracing_cpumask", 0644, d_tracer,
6195 tr, &tracing_cpumask_fops);
6196
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006197 trace_create_file("trace_options", 0644, d_tracer,
6198 tr, &tracing_iter_fops);
6199
6200 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006201 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006202
6203 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006204 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006205
6206 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006207 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006208
6209 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6210 tr, &tracing_total_entries_fops);
6211
Wang YanQing238ae932013-05-26 16:52:01 +08006212 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006213 tr, &tracing_free_buffer_fops);
6214
6215 trace_create_file("trace_marker", 0220, d_tracer,
6216 tr, &tracing_mark_fops);
6217
6218 trace_create_file("trace_clock", 0644, d_tracer, tr,
6219 &trace_clock_fops);
6220
6221 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006222 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006223
6224#ifdef CONFIG_TRACER_SNAPSHOT
6225 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006226 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006227#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006228
6229 for_each_tracing_cpu(cpu)
6230 tracing_init_debugfs_percpu(tr, cpu);
6231
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006232}
6233
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006234static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006235{
6236 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006237
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006238 trace_access_lock_init();
6239
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006240 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006241 if (!d_tracer)
6242 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006243
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006244 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006245
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006246 trace_create_file("available_tracers", 0444, d_tracer,
6247 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006248
Li Zefan339ae5d2009-04-17 10:34:30 +08006249 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006250 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006251
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04006252#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006253 trace_create_file("tracing_max_latency", 0644, d_tracer,
6254 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08006255#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006256
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006257 trace_create_file("tracing_thresh", 0644, d_tracer,
6258 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006259
Li Zefan339ae5d2009-04-17 10:34:30 +08006260 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006261 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006262
Avadh Patel69abe6a2009-04-10 16:04:48 -04006263 trace_create_file("saved_cmdlines", 0444, d_tracer,
6264 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006265
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006266#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006267 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6268 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006269#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006270
Steven Rostedt277ba042012-08-03 16:10:49 -04006271 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006272
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006273 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006274
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006275 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006276}
6277
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006278static int trace_panic_handler(struct notifier_block *this,
6279 unsigned long event, void *unused)
6280{
Steven Rostedt944ac422008-10-23 19:26:08 -04006281 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006282 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006283 return NOTIFY_OK;
6284}
6285
6286static struct notifier_block trace_panic_notifier = {
6287 .notifier_call = trace_panic_handler,
6288 .next = NULL,
6289 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6290};
6291
6292static int trace_die_handler(struct notifier_block *self,
6293 unsigned long val,
6294 void *data)
6295{
6296 switch (val) {
6297 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006298 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006299 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006300 break;
6301 default:
6302 break;
6303 }
6304 return NOTIFY_OK;
6305}
6306
6307static struct notifier_block trace_die_notifier = {
6308 .notifier_call = trace_die_handler,
6309 .priority = 200
6310};
6311
6312/*
6313 * printk is set to max of 1024, we really don't need it that big.
6314 * Nothing should be printing 1000 characters anyway.
6315 */
6316#define TRACE_MAX_PRINT 1000
6317
6318/*
6319 * Define here KERN_TRACE so that we have one place to modify
6320 * it if we decide to change what log level the ftrace dump
6321 * should be at.
6322 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006323#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006324
Jason Wessel955b61e2010-08-05 09:22:23 -05006325void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006326trace_printk_seq(struct trace_seq *s)
6327{
6328 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006329 if (s->len >= TRACE_MAX_PRINT)
6330 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006331
6332 /* should be zero ended, but we are paranoid. */
6333 s->buffer[s->len] = 0;
6334
6335 printk(KERN_TRACE "%s", s->buffer);
6336
Steven Rostedtf9520752009-03-02 14:04:40 -05006337 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006338}
6339
Jason Wessel955b61e2010-08-05 09:22:23 -05006340void trace_init_global_iter(struct trace_iterator *iter)
6341{
6342 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006343 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006344 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006345 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006346
6347 if (iter->trace && iter->trace->open)
6348 iter->trace->open(iter);
6349
6350 /* Annotate start of buffers if we had overruns */
6351 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6352 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6353
6354 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6355 if (trace_clocks[iter->tr->clock_id].in_ns)
6356 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006357}
6358
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006359void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006360{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006361 /* use static because iter can be a bit big for the stack */
6362 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006363 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006364 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006365 unsigned long flags;
6366 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006367
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006368 /* Only allow one dump user at a time. */
6369 if (atomic_inc_return(&dump_running) != 1) {
6370 atomic_dec(&dump_running);
6371 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006372 }
6373
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006374 /*
6375 * Always turn off tracing when we dump.
6376 * We don't need to show trace output of what happens
6377 * between multiple crashes.
6378 *
6379 * If the user does a sysrq-z, then they can re-enable
6380 * tracing with echo 1 > tracing_on.
6381 */
6382 tracing_off();
6383
6384 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006385
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006386 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006387 trace_init_global_iter(&iter);
6388
Steven Rostedtd7690412008-10-01 00:29:53 -04006389 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006390 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006391 }
6392
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006393 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6394
Török Edwinb54d3de2008-11-22 13:28:48 +02006395 /* don't look at user memory in panic mode */
6396 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6397
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006398 switch (oops_dump_mode) {
6399 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006400 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006401 break;
6402 case DUMP_ORIG:
6403 iter.cpu_file = raw_smp_processor_id();
6404 break;
6405 case DUMP_NONE:
6406 goto out_enable;
6407 default:
6408 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006409 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006410 }
6411
6412 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006413
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006414 /* Did function tracer already get disabled? */
6415 if (ftrace_is_dead()) {
6416 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6417 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6418 }
6419
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006420 /*
6421 * We need to stop all tracing on all CPUS to read the
6422 * the next buffer. This is a bit expensive, but is
6423 * not done often. We fill all what we can read,
6424 * and then release the locks again.
6425 */
6426
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006427 while (!trace_empty(&iter)) {
6428
6429 if (!cnt)
6430 printk(KERN_TRACE "---------------------------------\n");
6431
6432 cnt++;
6433
6434 /* reset all but tr, trace, and overruns */
6435 memset(&iter.seq, 0,
6436 sizeof(struct trace_iterator) -
6437 offsetof(struct trace_iterator, seq));
6438 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6439 iter.pos = -1;
6440
Jason Wessel955b61e2010-08-05 09:22:23 -05006441 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006442 int ret;
6443
6444 ret = print_trace_line(&iter);
6445 if (ret != TRACE_TYPE_NO_CONSUME)
6446 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006447 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006448 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006449
6450 trace_printk_seq(&iter.seq);
6451 }
6452
6453 if (!cnt)
6454 printk(KERN_TRACE " (ftrace buffer empty)\n");
6455 else
6456 printk(KERN_TRACE "---------------------------------\n");
6457
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006458 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006459 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006460
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006461 for_each_tracing_cpu(cpu) {
6462 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006463 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006464 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006465 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006466}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006467EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006468
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006469__init static int tracer_alloc_buffers(void)
6470{
Steven Rostedt73c51622009-03-11 13:42:01 -04006471 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306472 int ret = -ENOMEM;
6473
David Sharp750912f2010-12-08 13:46:47 -08006474
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306475 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6476 goto out;
6477
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006478 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306479 goto out_free_buffer_mask;
6480
Steven Rostedt07d777f2011-09-22 14:01:55 -04006481 /* Only allocate trace_printk buffers if a trace_printk exists */
6482 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006483 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006484 trace_printk_init_buffers();
6485
Steven Rostedt73c51622009-03-11 13:42:01 -04006486 /* To save memory, keep the ring buffer size to its minimum */
6487 if (ring_buffer_expanded)
6488 ring_buf_size = trace_buf_size;
6489 else
6490 ring_buf_size = 1;
6491
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306492 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006493 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006494
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006495 raw_spin_lock_init(&global_trace.start_lock);
6496
Steven Rostedtab464282008-05-12 21:21:00 +02006497 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006498 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006499 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6500 WARN_ON(1);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306501 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006502 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006503
Steven Rostedt499e5472012-02-22 15:50:28 -05006504 if (global_trace.buffer_disabled)
6505 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006506
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006507 trace_init_cmdlines();
6508
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006509 /*
6510 * register_tracer() might reference current_trace, so it
6511 * needs to be set before we register anything. This is
6512 * just a bootstrap of current_trace anyway.
6513 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006514 global_trace.current_trace = &nop_trace;
6515
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006516 register_tracer(&nop_trace);
6517
Steven Rostedt60a11772008-05-12 21:20:44 +02006518 /* All seems OK, enable tracing */
6519 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006520
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006521 atomic_notifier_chain_register(&panic_notifier_list,
6522 &trace_panic_notifier);
6523
6524 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006525
Steven Rostedtae63b312012-05-03 23:09:03 -04006526 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6527
6528 INIT_LIST_HEAD(&global_trace.systems);
6529 INIT_LIST_HEAD(&global_trace.events);
6530 list_add(&global_trace.list, &ftrace_trace_arrays);
6531
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006532 while (trace_boot_options) {
6533 char *option;
6534
6535 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006536 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006537 }
6538
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006539 register_snapshot_cmd();
6540
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006541 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006542
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306543out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006544 free_percpu(global_trace.trace_buffer.data);
6545#ifdef CONFIG_TRACER_MAX_TRACE
6546 free_percpu(global_trace.max_buffer.data);
6547#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006548 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306549out_free_buffer_mask:
6550 free_cpumask_var(tracing_buffer_mask);
6551out:
6552 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006553}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006554
6555__init static int clear_boot_tracer(void)
6556{
6557 /*
6558 * The default tracer at boot buffer is an init section.
6559 * This function is called in lateinit. If we did not
6560 * find the boot tracer, then clear it out, to prevent
6561 * later registration from accessing the buffer that is
6562 * about to be freed.
6563 */
6564 if (!default_bootup_tracer)
6565 return 0;
6566
6567 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6568 default_bootup_tracer);
6569 default_bootup_tracer = NULL;
6570
6571 return 0;
6572}
6573
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006574early_initcall(tracer_alloc_buffers);
6575fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006576late_initcall(clear_boot_tracer);