blob: f5fc56bf0227e8da5cc5781cb62cffbd7c9d18e8 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Alexander Z Lam94571582013-08-02 18:36:16 -0700278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
602void free_snapshot(struct trace_array *tr)
603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400966/*
967 * ftrace_max_lock is used to protect the swapping of buffers
968 * when taking a max snapshot. The buffers themselves are
969 * protected by per_cpu spinlocks. But the action of the swap
970 * needs its own lock.
971 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100972 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973 * with performance when lockdep debugging is enabled.
974 *
975 * It is also used in other places outside the update_max_tr
976 * so it needs to be defined outside of the
977 * CONFIG_TRACER_MAX_TRACE.
978 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100979static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100980 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Tim Bird0e950172010-02-25 15:36:43 -0800982unsigned long __read_mostly tracing_thresh;
983
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400985/*
986 * Copy the new maximum trace into the separate maximum-trace
987 * structure. (this way the maximum trace is permanently saved,
988 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
989 */
990static void
991__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
992{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500993 struct trace_buffer *trace_buf = &tr->trace_buffer;
994 struct trace_buffer *max_buf = &tr->max_buffer;
995 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
996 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400997
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500998 max_buf->cpu = cpu;
999 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001000
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001001 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001002 max_data->critical_start = data->critical_start;
1003 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001004
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001005 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001006 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001007 /*
1008 * If tsk == current, then use current_uid(), as that does not use
1009 * RCU. The irq tracer can be called out of RCU scope.
1010 */
1011 if (tsk == current)
1012 max_data->uid = current_uid();
1013 else
1014 max_data->uid = task_uid(tsk);
1015
Steven Rostedt8248ac02009-09-02 12:27:41 -04001016 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1017 max_data->policy = tsk->policy;
1018 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001019
1020 /* record this tasks comm */
1021 tracing_record_cmdline(tsk);
1022}
1023
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001024/**
1025 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1026 * @tr: tracer
1027 * @tsk: the task with the latency
1028 * @cpu: The cpu that initiated the trace.
1029 *
1030 * Flip the buffers between the @tr and the max_tr and record information
1031 * about which task was the cause of this latency.
1032 */
Ingo Molnare309b412008-05-12 21:20:51 +02001033void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001034update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1035{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001036 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001037
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001038 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001039 return;
1040
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001041 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001042
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001043 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001044 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001045 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001046 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001047 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001048
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001049 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001050
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001051 buf = tr->trace_buffer.buffer;
1052 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1053 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001056 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001057}
1058
1059/**
1060 * update_max_tr_single - only copy one trace over, and reset the rest
1061 * @tr - tracer
1062 * @tsk - task with the latency
1063 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001064 *
1065 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001066 */
Ingo Molnare309b412008-05-12 21:20:51 +02001067void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001068update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1069{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001070 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001071
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001072 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001073 return;
1074
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001075 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001076 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001077 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001078 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001079 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001080 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001081
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001082 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001083
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001084 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001085
Steven Rostedte8165db2009-09-03 19:13:05 -04001086 if (ret == -EBUSY) {
1087 /*
1088 * We failed to swap the buffer due to a commit taking
1089 * place on this CPU. We fail to record, but we reset
1090 * the max trace buffer (no one writes directly to it)
1091 * and flag that it failed.
1092 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001093 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001094 "Failed to swap buffers due to commit in progress\n");
1095 }
1096
Steven Rostedte8165db2009-09-03 19:13:05 -04001097 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001098
1099 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001100 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001101}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001102#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001103
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001104static void default_wait_pipe(struct trace_iterator *iter)
1105{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001106 /* Iterators are static, they should be filled or empty */
1107 if (trace_buffer_iter(iter, iter->cpu_file))
1108 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001109
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001110 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001111}
1112
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001113#ifdef CONFIG_FTRACE_STARTUP_TEST
1114static int run_tracer_selftest(struct tracer *type)
1115{
1116 struct trace_array *tr = &global_trace;
1117 struct tracer *saved_tracer = tr->current_trace;
1118 int ret;
1119
1120 if (!type->selftest || tracing_selftest_disabled)
1121 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
1123 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001124 * Run a selftest on this tracer.
1125 * Here we reset the trace buffer, and set the current
1126 * tracer to be this tracer. The tracer can then run some
1127 * internal tracing to verify that everything is in order.
1128 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001129 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001130 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001131
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001132 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001133
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001134#ifdef CONFIG_TRACER_MAX_TRACE
1135 if (type->use_max_tr) {
1136 /* If we expanded the buffers, make sure the max is expanded too */
1137 if (ring_buffer_expanded)
1138 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1139 RING_BUFFER_ALL_CPUS);
1140 tr->allocated_snapshot = true;
1141 }
1142#endif
1143
1144 /* the test is responsible for initializing and enabling */
1145 pr_info("Testing tracer %s: ", type->name);
1146 ret = type->selftest(type, tr);
1147 /* the test is responsible for resetting too */
1148 tr->current_trace = saved_tracer;
1149 if (ret) {
1150 printk(KERN_CONT "FAILED!\n");
1151 /* Add the warning after printing 'FAILED' */
1152 WARN_ON(1);
1153 return -1;
1154 }
1155 /* Only reset on passing, to avoid touching corrupted buffers */
1156 tracing_reset_online_cpus(&tr->trace_buffer);
1157
1158#ifdef CONFIG_TRACER_MAX_TRACE
1159 if (type->use_max_tr) {
1160 tr->allocated_snapshot = false;
1161
1162 /* Shrink the max buffer again */
1163 if (ring_buffer_expanded)
1164 ring_buffer_resize(tr->max_buffer.buffer, 1,
1165 RING_BUFFER_ALL_CPUS);
1166 }
1167#endif
1168
1169 printk(KERN_CONT "PASSED\n");
1170 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001171}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001172#else
1173static inline int run_tracer_selftest(struct tracer *type)
1174{
1175 return 0;
1176}
1177#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001179/**
1180 * register_tracer - register a tracer with the ftrace system.
1181 * @type - the plugin for the tracer
1182 *
1183 * Register a new plugin tracer.
1184 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001185int register_tracer(struct tracer *type)
1186{
1187 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 int ret = 0;
1189
1190 if (!type->name) {
1191 pr_info("Tracer must have a name\n");
1192 return -1;
1193 }
1194
Dan Carpenter24a461d2010-07-10 12:06:44 +02001195 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001196 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1197 return -1;
1198 }
1199
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001200 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001201
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001202 tracing_selftest_running = true;
1203
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001204 for (t = trace_types; t; t = t->next) {
1205 if (strcmp(type->name, t->name) == 0) {
1206 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001207 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001208 type->name);
1209 ret = -1;
1210 goto out;
1211 }
1212 }
1213
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001214 if (!type->set_flag)
1215 type->set_flag = &dummy_set_flag;
1216 if (!type->flags)
1217 type->flags = &dummy_tracer_flags;
1218 else
1219 if (!type->flags->opts)
1220 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001221 if (!type->wait_pipe)
1222 type->wait_pipe = default_wait_pipe;
1223
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001224 ret = run_tracer_selftest(type);
1225 if (ret < 0)
1226 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001227
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001228 type->next = trace_types;
1229 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001230
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001231 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001232 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001233 mutex_unlock(&trace_types_lock);
1234
Steven Rostedtdac74942009-02-05 01:13:38 -05001235 if (ret || !default_bootup_tracer)
1236 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001237
Li Zefanee6c2c12009-09-18 14:06:47 +08001238 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001239 goto out_unlock;
1240
1241 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1242 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001243 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001244 default_bootup_tracer = NULL;
1245 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001246 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001247#ifdef CONFIG_FTRACE_STARTUP_TEST
1248 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1249 type->name);
1250#endif
1251
1252 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001253 return ret;
1254}
1255
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001257{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001258 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001259
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001260 if (!buffer)
1261 return;
1262
Steven Rostedtf6339032009-09-04 12:35:16 -04001263 ring_buffer_record_disable(buffer);
1264
1265 /* Make sure all commits have finished */
1266 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001267 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001268
1269 ring_buffer_record_enable(buffer);
1270}
1271
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001272void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001274 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001275 int cpu;
1276
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001277 if (!buffer)
1278 return;
1279
Steven Rostedt621968c2009-09-04 12:02:35 -04001280 ring_buffer_record_disable(buffer);
1281
1282 /* Make sure all commits have finished */
1283 synchronize_sched();
1284
Alexander Z Lam94571582013-08-02 18:36:16 -07001285 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001286
1287 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001288 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001289
1290 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001291}
1292
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001293/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001294void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001295{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001296 struct trace_array *tr;
1297
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001298 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001299 tracing_reset_online_cpus(&tr->trace_buffer);
1300#ifdef CONFIG_TRACER_MAX_TRACE
1301 tracing_reset_online_cpus(&tr->max_buffer);
1302#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001303 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001304}
1305
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001307#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001308static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1309static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1310static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1311static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001312static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001313
Steven Rostedt25b0b442008-05-12 21:21:00 +02001314/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001315static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001316
1317static void trace_init_cmdlines(void)
1318{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001319 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1320 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001321 cmdline_idx = 0;
1322}
1323
Carsten Emdeb5130b12009-09-13 01:43:07 +02001324int is_tracing_stopped(void)
1325{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001326 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001327}
1328
Steven Rostedt0f048702008-11-05 16:05:44 -05001329/**
1330 * tracing_start - quick start of the tracer
1331 *
1332 * If tracing is enabled but was stopped by tracing_stop,
1333 * this will start the tracer back up.
1334 */
1335void tracing_start(void)
1336{
1337 struct ring_buffer *buffer;
1338 unsigned long flags;
1339
1340 if (tracing_disabled)
1341 return;
1342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001343 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1344 if (--global_trace.stop_count) {
1345 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001346 /* Someone screwed up their debugging */
1347 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001348 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001349 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001350 goto out;
1351 }
1352
Steven Rostedta2f80712010-03-12 19:56:00 -05001353 /* Prevent the buffers from switching */
1354 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001355
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001356 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001357 if (buffer)
1358 ring_buffer_record_enable(buffer);
1359
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001360#ifdef CONFIG_TRACER_MAX_TRACE
1361 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001362 if (buffer)
1363 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001364#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001365
Steven Rostedta2f80712010-03-12 19:56:00 -05001366 arch_spin_unlock(&ftrace_max_lock);
1367
Steven Rostedt0f048702008-11-05 16:05:44 -05001368 ftrace_start();
1369 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001370 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1371}
1372
1373static void tracing_start_tr(struct trace_array *tr)
1374{
1375 struct ring_buffer *buffer;
1376 unsigned long flags;
1377
1378 if (tracing_disabled)
1379 return;
1380
1381 /* If global, we need to also start the max tracer */
1382 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1383 return tracing_start();
1384
1385 raw_spin_lock_irqsave(&tr->start_lock, flags);
1386
1387 if (--tr->stop_count) {
1388 if (tr->stop_count < 0) {
1389 /* Someone screwed up their debugging */
1390 WARN_ON_ONCE(1);
1391 tr->stop_count = 0;
1392 }
1393 goto out;
1394 }
1395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001397 if (buffer)
1398 ring_buffer_record_enable(buffer);
1399
1400 out:
1401 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001402}
1403
1404/**
1405 * tracing_stop - quick stop of the tracer
1406 *
1407 * Light weight way to stop tracing. Use in conjunction with
1408 * tracing_start.
1409 */
1410void tracing_stop(void)
1411{
1412 struct ring_buffer *buffer;
1413 unsigned long flags;
1414
1415 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001416 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1417 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001418 goto out;
1419
Steven Rostedta2f80712010-03-12 19:56:00 -05001420 /* Prevent the buffers from switching */
1421 arch_spin_lock(&ftrace_max_lock);
1422
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001423 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001424 if (buffer)
1425 ring_buffer_record_disable(buffer);
1426
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001427#ifdef CONFIG_TRACER_MAX_TRACE
1428 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001429 if (buffer)
1430 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001431#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001432
Steven Rostedta2f80712010-03-12 19:56:00 -05001433 arch_spin_unlock(&ftrace_max_lock);
1434
Steven Rostedt0f048702008-11-05 16:05:44 -05001435 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001436 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1437}
1438
1439static void tracing_stop_tr(struct trace_array *tr)
1440{
1441 struct ring_buffer *buffer;
1442 unsigned long flags;
1443
1444 /* If global, we need to also stop the max tracer */
1445 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1446 return tracing_stop();
1447
1448 raw_spin_lock_irqsave(&tr->start_lock, flags);
1449 if (tr->stop_count++)
1450 goto out;
1451
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001452 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001453 if (buffer)
1454 ring_buffer_record_disable(buffer);
1455
1456 out:
1457 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001458}
1459
Ingo Molnare309b412008-05-12 21:20:51 +02001460void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001461
Ingo Molnare309b412008-05-12 21:20:51 +02001462static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463{
Carsten Emdea635cf02009-03-18 09:00:41 +01001464 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001465
1466 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1467 return;
1468
1469 /*
1470 * It's not the end of the world if we don't get
1471 * the lock, but we also don't want to spin
1472 * nor do we want to disable interrupts,
1473 * so if we miss here, then better luck next time.
1474 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001475 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001476 return;
1477
1478 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001479 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1481
Carsten Emdea635cf02009-03-18 09:00:41 +01001482 /*
1483 * Check whether the cmdline buffer at idx has a pid
1484 * mapped. We are going to overwrite that entry so we
1485 * need to clear the map_pid_to_cmdline. Otherwise we
1486 * would read the new comm for the old pid.
1487 */
1488 pid = map_cmdline_to_pid[idx];
1489 if (pid != NO_CMDLINE_MAP)
1490 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001491
Carsten Emdea635cf02009-03-18 09:00:41 +01001492 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493 map_pid_to_cmdline[tsk->pid] = idx;
1494
1495 cmdline_idx = idx;
1496 }
1497
1498 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1499
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001500 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501}
1502
Steven Rostedt4ca53082009-03-16 19:20:15 -04001503void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001504{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001505 unsigned map;
1506
Steven Rostedt4ca53082009-03-16 19:20:15 -04001507 if (!pid) {
1508 strcpy(comm, "<idle>");
1509 return;
1510 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Steven Rostedt74bf4072010-01-25 15:11:53 -05001512 if (WARN_ON_ONCE(pid < 0)) {
1513 strcpy(comm, "<XXX>");
1514 return;
1515 }
1516
Steven Rostedt4ca53082009-03-16 19:20:15 -04001517 if (pid > PID_MAX_DEFAULT) {
1518 strcpy(comm, "<...>");
1519 return;
1520 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001521
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001522 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001523 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001525 if (map != NO_CMDLINE_MAP)
1526 strcpy(comm, saved_cmdlines[map]);
1527 else
1528 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001529
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001530 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001531 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532}
1533
Ingo Molnare309b412008-05-12 21:20:51 +02001534void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001536 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537 return;
1538
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001539 if (!__this_cpu_read(trace_cmdline_save))
1540 return;
1541
1542 __this_cpu_write(trace_cmdline_save, false);
1543
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544 trace_save_cmdline(tsk);
1545}
1546
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001547void
Steven Rostedt38697052008-10-01 13:14:09 -04001548tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1549 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550{
1551 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001552
Steven Rostedt777e2082008-09-29 23:02:42 -04001553 entry->preempt_count = pc & 0xff;
1554 entry->pid = (tsk) ? tsk->pid : 0;
1555 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001556#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001557 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001558#else
1559 TRACE_FLAG_IRQS_NOSUPPORT |
1560#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001561 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1562 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001563 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1564 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001565}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001566EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001567
Steven Rostedte77405a2009-09-02 14:17:06 -04001568struct ring_buffer_event *
1569trace_buffer_lock_reserve(struct ring_buffer *buffer,
1570 int type,
1571 unsigned long len,
1572 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001573{
1574 struct ring_buffer_event *event;
1575
Steven Rostedte77405a2009-09-02 14:17:06 -04001576 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001577 if (event != NULL) {
1578 struct trace_entry *ent = ring_buffer_event_data(event);
1579
1580 tracing_generic_entry_update(ent, flags, pc);
1581 ent->type = type;
1582 }
1583
1584 return event;
1585}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001586
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001587void
1588__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1589{
1590 __this_cpu_write(trace_cmdline_save, true);
1591 ring_buffer_unlock_commit(buffer, event);
1592}
1593
Steven Rostedte77405a2009-09-02 14:17:06 -04001594static inline void
1595__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1596 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001597 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001598{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001599 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001600
Steven Rostedte77405a2009-09-02 14:17:06 -04001601 ftrace_trace_stack(buffer, flags, 6, pc);
1602 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001603}
1604
Steven Rostedte77405a2009-09-02 14:17:06 -04001605void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1606 struct ring_buffer_event *event,
1607 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001608{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001609 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001610}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001611EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001612
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001613static struct ring_buffer *temp_buffer;
1614
Steven Rostedtef5580d2009-02-27 19:38:04 -05001615struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001616trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1617 struct ftrace_event_file *ftrace_file,
1618 int type, unsigned long len,
1619 unsigned long flags, int pc)
1620{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001621 struct ring_buffer_event *entry;
1622
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001623 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001624 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001625 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001626 /*
1627 * If tracing is off, but we have triggers enabled
1628 * we still need to look at the event data. Use the temp_buffer
1629 * to store the trace event for the tigger to use. It's recusive
1630 * safe and will not be recorded anywhere.
1631 */
1632 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1633 *current_rb = temp_buffer;
1634 entry = trace_buffer_lock_reserve(*current_rb,
1635 type, len, flags, pc);
1636 }
1637 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001638}
1639EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1640
1641struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001642trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1643 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001644 unsigned long flags, int pc)
1645{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001646 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001647 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001648 type, len, flags, pc);
1649}
Steven Rostedt94487d62009-05-05 19:22:53 -04001650EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001651
Steven Rostedte77405a2009-09-02 14:17:06 -04001652void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1653 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001654 unsigned long flags, int pc)
1655{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001656 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001657}
Steven Rostedt94487d62009-05-05 19:22:53 -04001658EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001659
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001660void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1661 struct ring_buffer_event *event,
1662 unsigned long flags, int pc,
1663 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001664{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001665 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001666
1667 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1668 ftrace_trace_userstack(buffer, flags, pc);
1669}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001670EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001671
Steven Rostedte77405a2009-09-02 14:17:06 -04001672void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1673 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001674{
Steven Rostedte77405a2009-09-02 14:17:06 -04001675 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001676}
Steven Rostedt12acd472009-04-17 16:01:56 -04001677EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001678
Ingo Molnare309b412008-05-12 21:20:51 +02001679void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001680trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001681 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1682 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001683{
Tom Zanussie1112b42009-03-31 00:48:49 -05001684 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001685 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001686 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001687 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001688
Steven Rostedtd7690412008-10-01 00:29:53 -04001689 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001690 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001691 return;
1692
Steven Rostedte77405a2009-09-02 14:17:06 -04001693 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001694 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001695 if (!event)
1696 return;
1697 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001698 entry->ip = ip;
1699 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001700
Tom Zanussif306cc82013-10-24 08:34:17 -05001701 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001702 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001703}
1704
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001705#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001706
1707#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1708struct ftrace_stack {
1709 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1710};
1711
1712static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1713static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1714
Steven Rostedte77405a2009-09-02 14:17:06 -04001715static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001716 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001717 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001718{
Tom Zanussie1112b42009-03-31 00:48:49 -05001719 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001720 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001721 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001722 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001723 int use_stack;
1724 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001725
1726 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001727 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001728
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001729 /*
1730 * Since events can happen in NMIs there's no safe way to
1731 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1732 * or NMI comes in, it will just have to use the default
1733 * FTRACE_STACK_SIZE.
1734 */
1735 preempt_disable_notrace();
1736
Shan Wei82146522012-11-19 13:21:01 +08001737 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001738 /*
1739 * We don't need any atomic variables, just a barrier.
1740 * If an interrupt comes in, we don't care, because it would
1741 * have exited and put the counter back to what we want.
1742 * We just need a barrier to keep gcc from moving things
1743 * around.
1744 */
1745 barrier();
1746 if (use_stack == 1) {
1747 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1748 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1749
1750 if (regs)
1751 save_stack_trace_regs(regs, &trace);
1752 else
1753 save_stack_trace(&trace);
1754
1755 if (trace.nr_entries > size)
1756 size = trace.nr_entries;
1757 } else
1758 /* From now on, use_stack is a boolean */
1759 use_stack = 0;
1760
1761 size *= sizeof(unsigned long);
1762
1763 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1764 sizeof(*entry) + size, flags, pc);
1765 if (!event)
1766 goto out;
1767 entry = ring_buffer_event_data(event);
1768
1769 memset(&entry->caller, 0, size);
1770
1771 if (use_stack)
1772 memcpy(&entry->caller, trace.entries,
1773 trace.nr_entries * sizeof(unsigned long));
1774 else {
1775 trace.max_entries = FTRACE_STACK_ENTRIES;
1776 trace.entries = entry->caller;
1777 if (regs)
1778 save_stack_trace_regs(regs, &trace);
1779 else
1780 save_stack_trace(&trace);
1781 }
1782
1783 entry->size = trace.nr_entries;
1784
Tom Zanussif306cc82013-10-24 08:34:17 -05001785 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001786 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001787
1788 out:
1789 /* Again, don't let gcc optimize things here */
1790 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001791 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001792 preempt_enable_notrace();
1793
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001794}
1795
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001796void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1797 int skip, int pc, struct pt_regs *regs)
1798{
1799 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1800 return;
1801
1802 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1803}
1804
Steven Rostedte77405a2009-09-02 14:17:06 -04001805void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1806 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001807{
1808 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1809 return;
1810
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001811 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001812}
1813
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001814void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1815 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001816{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001817 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001818}
1819
Steven Rostedt03889382009-12-11 09:48:22 -05001820/**
1821 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001822 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001823 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001824void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001825{
1826 unsigned long flags;
1827
1828 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001829 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001830
1831 local_save_flags(flags);
1832
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001833 /*
1834 * Skip 3 more, seems to get us at the caller of
1835 * this function.
1836 */
1837 skip += 3;
1838 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1839 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001840}
1841
Steven Rostedt91e86e52010-11-10 12:56:12 +01001842static DEFINE_PER_CPU(int, user_stack_count);
1843
Steven Rostedte77405a2009-09-02 14:17:06 -04001844void
1845ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001846{
Tom Zanussie1112b42009-03-31 00:48:49 -05001847 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001848 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001849 struct userstack_entry *entry;
1850 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001851
1852 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1853 return;
1854
Steven Rostedtb6345872010-03-12 20:03:30 -05001855 /*
1856 * NMIs can not handle page faults, even with fix ups.
1857 * The save user stack can (and often does) fault.
1858 */
1859 if (unlikely(in_nmi()))
1860 return;
1861
Steven Rostedt91e86e52010-11-10 12:56:12 +01001862 /*
1863 * prevent recursion, since the user stack tracing may
1864 * trigger other kernel events.
1865 */
1866 preempt_disable();
1867 if (__this_cpu_read(user_stack_count))
1868 goto out;
1869
1870 __this_cpu_inc(user_stack_count);
1871
Steven Rostedte77405a2009-09-02 14:17:06 -04001872 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001873 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001874 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001875 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001876 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001877
Steven Rostedt48659d32009-09-11 11:36:23 -04001878 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001879 memset(&entry->caller, 0, sizeof(entry->caller));
1880
1881 trace.nr_entries = 0;
1882 trace.max_entries = FTRACE_STACK_ENTRIES;
1883 trace.skip = 0;
1884 trace.entries = entry->caller;
1885
1886 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001887 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001888 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001889
Li Zefan1dbd1952010-12-09 15:47:56 +08001890 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001891 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001892 out:
1893 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001894}
1895
Hannes Eder4fd27352009-02-10 19:44:12 +01001896#ifdef UNUSED
1897static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001898{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001899 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001900}
Hannes Eder4fd27352009-02-10 19:44:12 +01001901#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001902
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001903#endif /* CONFIG_STACKTRACE */
1904
Steven Rostedt07d777f2011-09-22 14:01:55 -04001905/* created for use with alloc_percpu */
1906struct trace_buffer_struct {
1907 char buffer[TRACE_BUF_SIZE];
1908};
1909
1910static struct trace_buffer_struct *trace_percpu_buffer;
1911static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1912static struct trace_buffer_struct *trace_percpu_irq_buffer;
1913static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1914
1915/*
1916 * The buffer used is dependent on the context. There is a per cpu
1917 * buffer for normal context, softirq contex, hard irq context and
1918 * for NMI context. Thise allows for lockless recording.
1919 *
1920 * Note, if the buffers failed to be allocated, then this returns NULL
1921 */
1922static char *get_trace_buf(void)
1923{
1924 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001925
1926 /*
1927 * If we have allocated per cpu buffers, then we do not
1928 * need to do any locking.
1929 */
1930 if (in_nmi())
1931 percpu_buffer = trace_percpu_nmi_buffer;
1932 else if (in_irq())
1933 percpu_buffer = trace_percpu_irq_buffer;
1934 else if (in_softirq())
1935 percpu_buffer = trace_percpu_sirq_buffer;
1936 else
1937 percpu_buffer = trace_percpu_buffer;
1938
1939 if (!percpu_buffer)
1940 return NULL;
1941
Shan Weid8a03492012-11-13 09:53:04 +08001942 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001943}
1944
1945static int alloc_percpu_trace_buffer(void)
1946{
1947 struct trace_buffer_struct *buffers;
1948 struct trace_buffer_struct *sirq_buffers;
1949 struct trace_buffer_struct *irq_buffers;
1950 struct trace_buffer_struct *nmi_buffers;
1951
1952 buffers = alloc_percpu(struct trace_buffer_struct);
1953 if (!buffers)
1954 goto err_warn;
1955
1956 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1957 if (!sirq_buffers)
1958 goto err_sirq;
1959
1960 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1961 if (!irq_buffers)
1962 goto err_irq;
1963
1964 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1965 if (!nmi_buffers)
1966 goto err_nmi;
1967
1968 trace_percpu_buffer = buffers;
1969 trace_percpu_sirq_buffer = sirq_buffers;
1970 trace_percpu_irq_buffer = irq_buffers;
1971 trace_percpu_nmi_buffer = nmi_buffers;
1972
1973 return 0;
1974
1975 err_nmi:
1976 free_percpu(irq_buffers);
1977 err_irq:
1978 free_percpu(sirq_buffers);
1979 err_sirq:
1980 free_percpu(buffers);
1981 err_warn:
1982 WARN(1, "Could not allocate percpu trace_printk buffer");
1983 return -ENOMEM;
1984}
1985
Steven Rostedt81698832012-10-11 10:15:05 -04001986static int buffers_allocated;
1987
Steven Rostedt07d777f2011-09-22 14:01:55 -04001988void trace_printk_init_buffers(void)
1989{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001990 if (buffers_allocated)
1991 return;
1992
1993 if (alloc_percpu_trace_buffer())
1994 return;
1995
1996 pr_info("ftrace: Allocated trace_printk buffers\n");
1997
Steven Rostedtb382ede62012-10-10 21:44:34 -04001998 /* Expand the buffers to set size */
1999 tracing_update_buffers();
2000
Steven Rostedt07d777f2011-09-22 14:01:55 -04002001 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002002
2003 /*
2004 * trace_printk_init_buffers() can be called by modules.
2005 * If that happens, then we need to start cmdline recording
2006 * directly here. If the global_trace.buffer is already
2007 * allocated here, then this was called by module code.
2008 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002009 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002010 tracing_start_cmdline_record();
2011}
2012
2013void trace_printk_start_comm(void)
2014{
2015 /* Start tracing comms if trace printk is set */
2016 if (!buffers_allocated)
2017 return;
2018 tracing_start_cmdline_record();
2019}
2020
2021static void trace_printk_start_stop_comm(int enabled)
2022{
2023 if (!buffers_allocated)
2024 return;
2025
2026 if (enabled)
2027 tracing_start_cmdline_record();
2028 else
2029 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002030}
2031
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002032/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002033 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002034 *
2035 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002036int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002037{
Tom Zanussie1112b42009-03-31 00:48:49 -05002038 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002039 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002040 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002041 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002042 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002043 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002044 char *tbuffer;
2045 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002046
2047 if (unlikely(tracing_selftest_running || tracing_disabled))
2048 return 0;
2049
2050 /* Don't pollute graph traces with trace_vprintk internals */
2051 pause_graph_tracing();
2052
2053 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002054 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002055
Steven Rostedt07d777f2011-09-22 14:01:55 -04002056 tbuffer = get_trace_buf();
2057 if (!tbuffer) {
2058 len = 0;
2059 goto out;
2060 }
2061
2062 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2063
2064 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002065 goto out;
2066
Steven Rostedt07d777f2011-09-22 14:01:55 -04002067 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002068 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002069 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002070 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2071 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002072 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002073 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074 entry = ring_buffer_event_data(event);
2075 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002076 entry->fmt = fmt;
2077
Steven Rostedt07d777f2011-09-22 14:01:55 -04002078 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002079 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002080 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002081 ftrace_trace_stack(buffer, flags, 6, pc);
2082 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002083
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002085 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002086 unpause_graph_tracing();
2087
2088 return len;
2089}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002090EXPORT_SYMBOL_GPL(trace_vbprintk);
2091
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002092static int
2093__trace_array_vprintk(struct ring_buffer *buffer,
2094 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095{
Tom Zanussie1112b42009-03-31 00:48:49 -05002096 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002097 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002098 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002099 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002100 unsigned long flags;
2101 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002102
2103 if (tracing_disabled || tracing_selftest_running)
2104 return 0;
2105
Steven Rostedt07d777f2011-09-22 14:01:55 -04002106 /* Don't pollute graph traces with trace_vprintk internals */
2107 pause_graph_tracing();
2108
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002109 pc = preempt_count();
2110 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002111
Steven Rostedt07d777f2011-09-22 14:01:55 -04002112
2113 tbuffer = get_trace_buf();
2114 if (!tbuffer) {
2115 len = 0;
2116 goto out;
2117 }
2118
2119 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2120 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002121 goto out;
2122
Steven Rostedt07d777f2011-09-22 14:01:55 -04002123 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002124 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002125 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002127 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002129 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002130 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002131
Steven Rostedt07d777f2011-09-22 14:01:55 -04002132 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002133 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002134 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002135 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002136 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002137 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002138 out:
2139 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002140 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002141
2142 return len;
2143}
Steven Rostedt659372d2009-09-03 19:11:07 -04002144
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002145int trace_array_vprintk(struct trace_array *tr,
2146 unsigned long ip, const char *fmt, va_list args)
2147{
2148 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2149}
2150
2151int trace_array_printk(struct trace_array *tr,
2152 unsigned long ip, const char *fmt, ...)
2153{
2154 int ret;
2155 va_list ap;
2156
2157 if (!(trace_flags & TRACE_ITER_PRINTK))
2158 return 0;
2159
2160 va_start(ap, fmt);
2161 ret = trace_array_vprintk(tr, ip, fmt, ap);
2162 va_end(ap);
2163 return ret;
2164}
2165
2166int trace_array_printk_buf(struct ring_buffer *buffer,
2167 unsigned long ip, const char *fmt, ...)
2168{
2169 int ret;
2170 va_list ap;
2171
2172 if (!(trace_flags & TRACE_ITER_PRINTK))
2173 return 0;
2174
2175 va_start(ap, fmt);
2176 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2177 va_end(ap);
2178 return ret;
2179}
2180
Steven Rostedt659372d2009-09-03 19:11:07 -04002181int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2182{
Steven Rostedta813a152009-10-09 01:41:35 -04002183 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002184}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002185EXPORT_SYMBOL_GPL(trace_vprintk);
2186
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002187static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002188{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002189 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2190
Steven Rostedt5a90f572008-09-03 17:42:51 -04002191 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002192 if (buf_iter)
2193 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002194}
2195
Ingo Molnare309b412008-05-12 21:20:51 +02002196static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002197peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2198 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002199{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002200 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002201 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002202
Steven Rostedtd7690412008-10-01 00:29:53 -04002203 if (buf_iter)
2204 event = ring_buffer_iter_peek(buf_iter, ts);
2205 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002206 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002207 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002208
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002209 if (event) {
2210 iter->ent_size = ring_buffer_event_length(event);
2211 return ring_buffer_event_data(event);
2212 }
2213 iter->ent_size = 0;
2214 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002215}
Steven Rostedtd7690412008-10-01 00:29:53 -04002216
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002217static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002218__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2219 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002220{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002221 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002222 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002223 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002224 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002225 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002226 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002227 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002228 int cpu;
2229
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002230 /*
2231 * If we are in a per_cpu trace file, don't bother by iterating over
2232 * all cpu and peek directly.
2233 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002234 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002235 if (ring_buffer_empty_cpu(buffer, cpu_file))
2236 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002237 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002238 if (ent_cpu)
2239 *ent_cpu = cpu_file;
2240
2241 return ent;
2242 }
2243
Steven Rostedtab464282008-05-12 21:21:00 +02002244 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002245
2246 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002247 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002248
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002250
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002251 /*
2252 * Pick the entry with the smallest timestamp:
2253 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002254 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255 next = ent;
2256 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002257 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002258 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002259 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002260 }
2261 }
2262
Steven Rostedt12b5da32012-03-27 10:43:28 -04002263 iter->ent_size = next_size;
2264
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002265 if (ent_cpu)
2266 *ent_cpu = next_cpu;
2267
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002268 if (ent_ts)
2269 *ent_ts = next_ts;
2270
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271 if (missing_events)
2272 *missing_events = next_lost;
2273
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274 return next;
2275}
2276
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002277/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002278struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2279 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002280{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002281 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002282}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002283
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002284/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002285void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002286{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002287 iter->ent = __find_next_entry(iter, &iter->cpu,
2288 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002289
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002290 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002291 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002292
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002293 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002294}
2295
Ingo Molnare309b412008-05-12 21:20:51 +02002296static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002297{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002298 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002299 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300}
2301
Ingo Molnare309b412008-05-12 21:20:51 +02002302static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002303{
2304 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002306 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002308 WARN_ON_ONCE(iter->leftover);
2309
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 (*pos)++;
2311
2312 /* can't go backwards */
2313 if (iter->idx > i)
2314 return NULL;
2315
2316 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002317 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002318 else
2319 ent = iter;
2320
2321 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002322 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002323
2324 iter->pos = *pos;
2325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 return ent;
2327}
2328
Jason Wessel955b61e2010-08-05 09:22:23 -05002329void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002330{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002331 struct ring_buffer_event *event;
2332 struct ring_buffer_iter *buf_iter;
2333 unsigned long entries = 0;
2334 u64 ts;
2335
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002336 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002337
Steven Rostedt6d158a82012-06-27 20:46:14 -04002338 buf_iter = trace_buffer_iter(iter, cpu);
2339 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002340 return;
2341
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002342 ring_buffer_iter_reset(buf_iter);
2343
2344 /*
2345 * We could have the case with the max latency tracers
2346 * that a reset never took place on a cpu. This is evident
2347 * by the timestamp being before the start of the buffer.
2348 */
2349 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002350 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002351 break;
2352 entries++;
2353 ring_buffer_read(buf_iter, NULL);
2354 }
2355
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002356 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002357}
2358
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002359/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002360 * The current tracer is copied to avoid a global locking
2361 * all around.
2362 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002363static void *s_start(struct seq_file *m, loff_t *pos)
2364{
2365 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002366 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002367 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 void *p = NULL;
2369 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002370 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002371
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002372 /*
2373 * copy the tracer to avoid using a global lock all around.
2374 * iter->trace is a copy of current_trace, the pointer to the
2375 * name may be used instead of a strcmp(), as iter->trace->name
2376 * will point to the same string as current_trace->name.
2377 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002379 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2380 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002381 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002383#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002384 if (iter->snapshot && iter->trace->use_max_tr)
2385 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002386#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002387
2388 if (!iter->snapshot)
2389 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391 if (*pos != iter->pos) {
2392 iter->ent = NULL;
2393 iter->cpu = 0;
2394 iter->idx = -1;
2395
Steven Rostedtae3b5092013-01-23 15:22:59 -05002396 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002397 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002398 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002399 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002400 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002401
Lai Jiangshanac91d852010-03-02 17:54:50 +08002402 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2404 ;
2405
2406 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002407 /*
2408 * If we overflowed the seq_file before, then we want
2409 * to just reuse the trace_seq buffer again.
2410 */
2411 if (iter->leftover)
2412 p = iter;
2413 else {
2414 l = *pos - 1;
2415 p = s_next(m, p, &l);
2416 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 }
2418
Lai Jiangshan4f535962009-05-18 19:35:34 +08002419 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002420 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421 return p;
2422}
2423
2424static void s_stop(struct seq_file *m, void *p)
2425{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002426 struct trace_iterator *iter = m->private;
2427
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002428#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002429 if (iter->snapshot && iter->trace->use_max_tr)
2430 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002432
2433 if (!iter->snapshot)
2434 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002436 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002437 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002438}
2439
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002440static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002441get_total_entries(struct trace_buffer *buf,
2442 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002443{
2444 unsigned long count;
2445 int cpu;
2446
2447 *total = 0;
2448 *entries = 0;
2449
2450 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002451 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002452 /*
2453 * If this buffer has skipped entries, then we hold all
2454 * entries for the trace and we need to ignore the
2455 * ones before the time stamp.
2456 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002457 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2458 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002459 /* total is the same as the entries */
2460 *total += count;
2461 } else
2462 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002463 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002464 *entries += count;
2465 }
2466}
2467
Ingo Molnare309b412008-05-12 21:20:51 +02002468static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469{
Michael Ellermana6168352008-08-20 16:36:11 -07002470 seq_puts(m, "# _------=> CPU# \n");
2471 seq_puts(m, "# / _-----=> irqs-off \n");
2472 seq_puts(m, "# | / _----=> need-resched \n");
2473 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2474 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002475 seq_puts(m, "# |||| / delay \n");
2476 seq_puts(m, "# cmd pid ||||| time | caller \n");
2477 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478}
2479
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002481{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482 unsigned long total;
2483 unsigned long entries;
2484
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002485 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002486 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2487 entries, total, num_online_cpus());
2488 seq_puts(m, "#\n");
2489}
2490
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002494 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002495 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002496}
2497
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002498static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002499{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002500 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002501 seq_puts(m, "# _-----=> irqs-off\n");
2502 seq_puts(m, "# / _----=> need-resched\n");
2503 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2504 seq_puts(m, "# || / _--=> preempt-depth\n");
2505 seq_puts(m, "# ||| / delay\n");
2506 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2507 seq_puts(m, "# | | | |||| | |\n");
2508}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509
Jiri Olsa62b915f2010-04-02 19:01:22 +02002510void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002511print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2512{
2513 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002514 struct trace_buffer *buf = iter->trace_buffer;
2515 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002516 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002517 unsigned long entries;
2518 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519 const char *name = "preemption";
2520
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002521 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002523 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002525 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002526 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002527 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002529 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002531 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002532 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002533 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002534 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002535#if defined(CONFIG_PREEMPT_NONE)
2536 "server",
2537#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2538 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002539#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002540 "preempt",
2541#else
2542 "unknown",
2543#endif
2544 /* These are reserved for later use */
2545 0, 0, 0, 0);
2546#ifdef CONFIG_SMP
2547 seq_printf(m, " #P:%d)\n", num_online_cpus());
2548#else
2549 seq_puts(m, ")\n");
2550#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002551 seq_puts(m, "# -----------------\n");
2552 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002553 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002554 data->comm, data->pid,
2555 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002556 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002557 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002558
2559 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002560 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002561 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2562 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002563 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002564 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2565 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002566 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567 }
2568
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002569 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002570}
2571
Steven Rostedta3097202008-11-07 22:36:02 -05002572static void test_cpu_buff_start(struct trace_iterator *iter)
2573{
2574 struct trace_seq *s = &iter->seq;
2575
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002576 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2577 return;
2578
2579 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2580 return;
2581
Rusty Russell44623442009-01-01 10:12:23 +10302582 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002583 return;
2584
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002585 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002586 return;
2587
Rusty Russell44623442009-01-01 10:12:23 +10302588 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002589
2590 /* Don't print started cpu buffer for the first entry of the trace */
2591 if (iter->idx > 1)
2592 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2593 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002594}
2595
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002596static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002597{
Steven Rostedt214023c2008-05-12 21:20:46 +02002598 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002600 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002601 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002602
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002603 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002604
Steven Rostedta3097202008-11-07 22:36:02 -05002605 test_cpu_buff_start(iter);
2606
Steven Rostedtf633cef2008-12-23 23:24:13 -05002607 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002608
2609 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002610 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2611 if (!trace_print_lat_context(iter))
2612 goto partial;
2613 } else {
2614 if (!trace_print_context(iter))
2615 goto partial;
2616 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002617 }
2618
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002619 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002620 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002621
2622 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2623 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002624
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002625 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002626partial:
2627 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002628}
2629
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002630static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002631{
2632 struct trace_seq *s = &iter->seq;
2633 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002634 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002635
2636 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002637
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002638 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002639 if (!trace_seq_printf(s, "%d %d %llu ",
2640 entry->pid, iter->cpu, iter->ts))
2641 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002642 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002643
Steven Rostedtf633cef2008-12-23 23:24:13 -05002644 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002645 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002646 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002647
2648 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2649 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002650
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002651 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002652partial:
2653 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002654}
2655
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002656static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002657{
2658 struct trace_seq *s = &iter->seq;
2659 unsigned char newline = '\n';
2660 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002661 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002662
2663 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002664
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002665 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2666 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2667 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2668 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2669 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002670
Steven Rostedtf633cef2008-12-23 23:24:13 -05002671 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002672 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002673 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002674 if (ret != TRACE_TYPE_HANDLED)
2675 return ret;
2676 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002677
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002678 SEQ_PUT_FIELD_RET(s, newline);
2679
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002680 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002681}
2682
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002683static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002684{
2685 struct trace_seq *s = &iter->seq;
2686 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002687 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002688
2689 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002690
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002691 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2692 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002693 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002694 SEQ_PUT_FIELD_RET(s, iter->ts);
2695 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002696
Steven Rostedtf633cef2008-12-23 23:24:13 -05002697 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002698 return event ? event->funcs->binary(iter, 0, event) :
2699 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002700}
2701
Jiri Olsa62b915f2010-04-02 19:01:22 +02002702int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002703{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002704 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 int cpu;
2706
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002707 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002708 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002709 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002710 buf_iter = trace_buffer_iter(iter, cpu);
2711 if (buf_iter) {
2712 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002713 return 0;
2714 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002715 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002716 return 0;
2717 }
2718 return 1;
2719 }
2720
Steven Rostedtab464282008-05-12 21:21:00 +02002721 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002722 buf_iter = trace_buffer_iter(iter, cpu);
2723 if (buf_iter) {
2724 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002725 return 0;
2726 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002727 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002728 return 0;
2729 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002730 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002731
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002732 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002733}
2734
Lai Jiangshan4f535962009-05-18 19:35:34 +08002735/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002736enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002737{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002738 enum print_line_t ret;
2739
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002740 if (iter->lost_events &&
2741 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2742 iter->cpu, iter->lost_events))
2743 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002744
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002745 if (iter->trace && iter->trace->print_line) {
2746 ret = iter->trace->print_line(iter);
2747 if (ret != TRACE_TYPE_UNHANDLED)
2748 return ret;
2749 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002750
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002751 if (iter->ent->type == TRACE_BPUTS &&
2752 trace_flags & TRACE_ITER_PRINTK &&
2753 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2754 return trace_print_bputs_msg_only(iter);
2755
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002756 if (iter->ent->type == TRACE_BPRINT &&
2757 trace_flags & TRACE_ITER_PRINTK &&
2758 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002759 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002760
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002761 if (iter->ent->type == TRACE_PRINT &&
2762 trace_flags & TRACE_ITER_PRINTK &&
2763 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002764 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002765
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002766 if (trace_flags & TRACE_ITER_BIN)
2767 return print_bin_fmt(iter);
2768
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002769 if (trace_flags & TRACE_ITER_HEX)
2770 return print_hex_fmt(iter);
2771
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002772 if (trace_flags & TRACE_ITER_RAW)
2773 return print_raw_fmt(iter);
2774
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002775 return print_trace_fmt(iter);
2776}
2777
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002778void trace_latency_header(struct seq_file *m)
2779{
2780 struct trace_iterator *iter = m->private;
2781
2782 /* print nothing if the buffers are empty */
2783 if (trace_empty(iter))
2784 return;
2785
2786 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2787 print_trace_header(m, iter);
2788
2789 if (!(trace_flags & TRACE_ITER_VERBOSE))
2790 print_lat_help_header(m);
2791}
2792
Jiri Olsa62b915f2010-04-02 19:01:22 +02002793void trace_default_header(struct seq_file *m)
2794{
2795 struct trace_iterator *iter = m->private;
2796
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002797 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2798 return;
2799
Jiri Olsa62b915f2010-04-02 19:01:22 +02002800 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2801 /* print nothing if the buffers are empty */
2802 if (trace_empty(iter))
2803 return;
2804 print_trace_header(m, iter);
2805 if (!(trace_flags & TRACE_ITER_VERBOSE))
2806 print_lat_help_header(m);
2807 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002808 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2809 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002810 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002811 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002812 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002813 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002814 }
2815}
2816
Steven Rostedte0a413f2011-09-29 21:26:16 -04002817static void test_ftrace_alive(struct seq_file *m)
2818{
2819 if (!ftrace_is_dead())
2820 return;
2821 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2822 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2823}
2824
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002825#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002826static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002827{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002828 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2829 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2830 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002831 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002832 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2833 seq_printf(m, "# is not a '0' or '1')\n");
2834}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002835
2836static void show_snapshot_percpu_help(struct seq_file *m)
2837{
2838 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2839#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2840 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2841 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2842#else
2843 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2844 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2845#endif
2846 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2847 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2848 seq_printf(m, "# is not a '0' or '1')\n");
2849}
2850
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002851static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2852{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002853 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002854 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2855 else
2856 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2857
2858 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002859 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2860 show_snapshot_main_help(m);
2861 else
2862 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002863}
2864#else
2865/* Should never be called */
2866static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2867#endif
2868
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869static int s_show(struct seq_file *m, void *v)
2870{
2871 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002872 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002873
2874 if (iter->ent == NULL) {
2875 if (iter->tr) {
2876 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2877 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002878 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002879 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880 if (iter->snapshot && trace_empty(iter))
2881 print_snapshot_help(m, iter);
2882 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002883 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002884 else
2885 trace_default_header(m);
2886
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002887 } else if (iter->leftover) {
2888 /*
2889 * If we filled the seq_file buffer earlier, we
2890 * want to just show it now.
2891 */
2892 ret = trace_print_seq(m, &iter->seq);
2893
2894 /* ret should this time be zero, but you never know */
2895 iter->leftover = ret;
2896
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002897 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002898 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002899 ret = trace_print_seq(m, &iter->seq);
2900 /*
2901 * If we overflow the seq_file buffer, then it will
2902 * ask us for this data again at start up.
2903 * Use that instead.
2904 * ret is 0 if seq_file write succeeded.
2905 * -1 otherwise.
2906 */
2907 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002908 }
2909
2910 return 0;
2911}
2912
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002913/*
2914 * Should be used after trace_array_get(), trace_types_lock
2915 * ensures that i_cdev was already initialized.
2916 */
2917static inline int tracing_get_cpu(struct inode *inode)
2918{
2919 if (inode->i_cdev) /* See trace_create_cpu_file() */
2920 return (long)inode->i_cdev - 1;
2921 return RING_BUFFER_ALL_CPUS;
2922}
2923
James Morris88e9d342009-09-22 16:43:43 -07002924static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002925 .start = s_start,
2926 .next = s_next,
2927 .stop = s_stop,
2928 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929};
2930
Ingo Molnare309b412008-05-12 21:20:51 +02002931static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002932__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002933{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002934 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002935 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002936 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002937
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002938 if (tracing_disabled)
2939 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002940
Jiri Olsa50e18b92012-04-25 10:23:39 +02002941 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002942 if (!iter)
2943 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002944
Steven Rostedt6d158a82012-06-27 20:46:14 -04002945 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2946 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002947 if (!iter->buffer_iter)
2948 goto release;
2949
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002950 /*
2951 * We make a copy of the current tracer to avoid concurrent
2952 * changes on it while we are reading.
2953 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002954 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002955 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002956 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002957 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002958
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002959 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002960
Li Zefan79f55992009-06-15 14:58:26 +08002961 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002962 goto fail;
2963
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002964 iter->tr = tr;
2965
2966#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002967 /* Currently only the top directory has a snapshot */
2968 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002969 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002970 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002971#endif
2972 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002973 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002974 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002975 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002976 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002978 /* Notify the tracer early; before we stop tracing. */
2979 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002980 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002981
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002982 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002983 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002984 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2985
David Sharp8be07092012-11-13 12:18:22 -08002986 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002987 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002988 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2989
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002990 /* stop the trace while dumping if we are not opening "snapshot" */
2991 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002992 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002993
Steven Rostedtae3b5092013-01-23 15:22:59 -05002994 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002995 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002996 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002997 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002998 }
2999 ring_buffer_read_prepare_sync();
3000 for_each_tracing_cpu(cpu) {
3001 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003002 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003003 }
3004 } else {
3005 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003006 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003007 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003008 ring_buffer_read_prepare_sync();
3009 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003010 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003011 }
3012
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003013 mutex_unlock(&trace_types_lock);
3014
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003016
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003017 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003018 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003019 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003020 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003021release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003022 seq_release_private(inode, file);
3023 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024}
3025
3026int tracing_open_generic(struct inode *inode, struct file *filp)
3027{
Steven Rostedt60a11772008-05-12 21:20:44 +02003028 if (tracing_disabled)
3029 return -ENODEV;
3030
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003031 filp->private_data = inode->i_private;
3032 return 0;
3033}
3034
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003035bool tracing_is_disabled(void)
3036{
3037 return (tracing_disabled) ? true: false;
3038}
3039
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003040/*
3041 * Open and update trace_array ref count.
3042 * Must have the current trace_array passed to it.
3043 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003044static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003045{
3046 struct trace_array *tr = inode->i_private;
3047
3048 if (tracing_disabled)
3049 return -ENODEV;
3050
3051 if (trace_array_get(tr) < 0)
3052 return -ENODEV;
3053
3054 filp->private_data = inode->i_private;
3055
3056 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003057}
3058
Hannes Eder4fd27352009-02-10 19:44:12 +01003059static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003060{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003061 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003062 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003063 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003064 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003066 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003067 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003068 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003069 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003070
Oleg Nesterov6484c712013-07-23 17:26:10 +02003071 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003072 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003073 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003074
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003075 for_each_tracing_cpu(cpu) {
3076 if (iter->buffer_iter[cpu])
3077 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3078 }
3079
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080 if (iter->trace && iter->trace->close)
3081 iter->trace->close(iter);
3082
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003083 if (!iter->snapshot)
3084 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003085 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003086
3087 __trace_array_put(tr);
3088
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003089 mutex_unlock(&trace_types_lock);
3090
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003091 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003092 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003093 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003094 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003095 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003096
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003097 return 0;
3098}
3099
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003100static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3101{
3102 struct trace_array *tr = inode->i_private;
3103
3104 trace_array_put(tr);
3105 return 0;
3106}
3107
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003108static int tracing_single_release_tr(struct inode *inode, struct file *file)
3109{
3110 struct trace_array *tr = inode->i_private;
3111
3112 trace_array_put(tr);
3113
3114 return single_release(inode, file);
3115}
3116
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003117static int tracing_open(struct inode *inode, struct file *file)
3118{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003119 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003120 struct trace_iterator *iter;
3121 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003123 if (trace_array_get(tr) < 0)
3124 return -ENODEV;
3125
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003126 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003127 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3128 int cpu = tracing_get_cpu(inode);
3129
3130 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003131 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003132 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003133 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003134 }
3135
3136 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003137 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003138 if (IS_ERR(iter))
3139 ret = PTR_ERR(iter);
3140 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3141 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3142 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003143
3144 if (ret < 0)
3145 trace_array_put(tr);
3146
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003147 return ret;
3148}
3149
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003150/*
3151 * Some tracers are not suitable for instance buffers.
3152 * A tracer is always available for the global array (toplevel)
3153 * or if it explicitly states that it is.
3154 */
3155static bool
3156trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3157{
3158 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3159}
3160
3161/* Find the next tracer that this trace array may use */
3162static struct tracer *
3163get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3164{
3165 while (t && !trace_ok_for_array(t, tr))
3166 t = t->next;
3167
3168 return t;
3169}
3170
Ingo Molnare309b412008-05-12 21:20:51 +02003171static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172t_next(struct seq_file *m, void *v, loff_t *pos)
3173{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003174 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003175 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003176
3177 (*pos)++;
3178
3179 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003180 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003181
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003182 return t;
3183}
3184
3185static void *t_start(struct seq_file *m, loff_t *pos)
3186{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003187 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003188 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003189 loff_t l = 0;
3190
3191 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003192
3193 t = get_tracer_for_array(tr, trace_types);
3194 for (; t && l < *pos; t = t_next(m, t, &l))
3195 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003196
3197 return t;
3198}
3199
3200static void t_stop(struct seq_file *m, void *p)
3201{
3202 mutex_unlock(&trace_types_lock);
3203}
3204
3205static int t_show(struct seq_file *m, void *v)
3206{
3207 struct tracer *t = v;
3208
3209 if (!t)
3210 return 0;
3211
3212 seq_printf(m, "%s", t->name);
3213 if (t->next)
3214 seq_putc(m, ' ');
3215 else
3216 seq_putc(m, '\n');
3217
3218 return 0;
3219}
3220
James Morris88e9d342009-09-22 16:43:43 -07003221static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003222 .start = t_start,
3223 .next = t_next,
3224 .stop = t_stop,
3225 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226};
3227
3228static int show_traces_open(struct inode *inode, struct file *file)
3229{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003230 struct trace_array *tr = inode->i_private;
3231 struct seq_file *m;
3232 int ret;
3233
Steven Rostedt60a11772008-05-12 21:20:44 +02003234 if (tracing_disabled)
3235 return -ENODEV;
3236
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003237 ret = seq_open(file, &show_traces_seq_ops);
3238 if (ret)
3239 return ret;
3240
3241 m = file->private_data;
3242 m->private = tr;
3243
3244 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003245}
3246
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003247static ssize_t
3248tracing_write_stub(struct file *filp, const char __user *ubuf,
3249 size_t count, loff_t *ppos)
3250{
3251 return count;
3252}
3253
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003254loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003255{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003256 int ret;
3257
Slava Pestov364829b2010-11-24 15:13:16 -08003258 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003259 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003260 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003261 file->f_pos = ret = 0;
3262
3263 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003264}
3265
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003266static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003267 .open = tracing_open,
3268 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003269 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003270 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003271 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003272};
3273
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003274static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003275 .open = show_traces_open,
3276 .read = seq_read,
3277 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003278 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003279};
3280
Ingo Molnar36dfe922008-05-12 21:20:52 +02003281/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003282 * The tracer itself will not take this lock, but still we want
3283 * to provide a consistent cpumask to user-space:
3284 */
3285static DEFINE_MUTEX(tracing_cpumask_update_lock);
3286
3287/*
3288 * Temporary storage for the character representation of the
3289 * CPU bitmask (and one more byte for the newline):
3290 */
3291static char mask_str[NR_CPUS + 1];
3292
Ingo Molnarc7078de2008-05-12 21:20:52 +02003293static ssize_t
3294tracing_cpumask_read(struct file *filp, char __user *ubuf,
3295 size_t count, loff_t *ppos)
3296{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003297 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003298 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003299
3300 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003301
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003302 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003303 if (count - len < 2) {
3304 count = -EINVAL;
3305 goto out_err;
3306 }
3307 len += sprintf(mask_str + len, "\n");
3308 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3309
3310out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003311 mutex_unlock(&tracing_cpumask_update_lock);
3312
3313 return count;
3314}
3315
3316static ssize_t
3317tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3318 size_t count, loff_t *ppos)
3319{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003320 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303321 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003322 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303323
3324 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3325 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003326
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303327 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003328 if (err)
3329 goto err_unlock;
3330
Li Zefan215368e2009-06-15 10:56:42 +08003331 mutex_lock(&tracing_cpumask_update_lock);
3332
Steven Rostedta5e25882008-12-02 15:34:05 -05003333 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003334 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003335 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003336 /*
3337 * Increase/decrease the disabled counter if we are
3338 * about to flip a bit in the cpumask:
3339 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003340 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303341 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003342 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3343 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003344 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003345 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303346 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003347 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3348 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003349 }
3350 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003351 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003352 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003353
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003354 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003355
Ingo Molnarc7078de2008-05-12 21:20:52 +02003356 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303357 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003358
Ingo Molnarc7078de2008-05-12 21:20:52 +02003359 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003360
3361err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003362 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003363
3364 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003365}
3366
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003367static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003368 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003369 .read = tracing_cpumask_read,
3370 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003371 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003372 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003373};
3374
Li Zefanfdb372e2009-12-08 11:15:59 +08003375static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003376{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003377 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003378 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003379 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003380 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003381
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003382 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003383 tracer_flags = tr->current_trace->flags->val;
3384 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003385
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386 for (i = 0; trace_options[i]; i++) {
3387 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003388 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003389 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003390 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003391 }
3392
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003393 for (i = 0; trace_opts[i].name; i++) {
3394 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003395 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003396 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003397 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003398 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003399 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003400
Li Zefanfdb372e2009-12-08 11:15:59 +08003401 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003402}
3403
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003404static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003405 struct tracer_flags *tracer_flags,
3406 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003407{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003408 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003409 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003410
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003411 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003412 if (ret)
3413 return ret;
3414
3415 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003416 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003417 else
Zhaolei77708412009-08-07 18:53:21 +08003418 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003419 return 0;
3420}
3421
Li Zefan8d18eaa2009-12-08 11:17:06 +08003422/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003423static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003424{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003425 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003426 struct tracer_flags *tracer_flags = trace->flags;
3427 struct tracer_opt *opts = NULL;
3428 int i;
3429
3430 for (i = 0; tracer_flags->opts[i].name; i++) {
3431 opts = &tracer_flags->opts[i];
3432
3433 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003434 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003435 }
3436
3437 return -EINVAL;
3438}
3439
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003440/* Some tracers require overwrite to stay enabled */
3441int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3442{
3443 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3444 return -1;
3445
3446 return 0;
3447}
3448
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003449int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003450{
3451 /* do nothing if flag is already set */
3452 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003453 return 0;
3454
3455 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003456 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003457 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003458 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003459
3460 if (enabled)
3461 trace_flags |= mask;
3462 else
3463 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003464
3465 if (mask == TRACE_ITER_RECORD_CMD)
3466 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003467
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003468 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003469 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003470#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003471 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003472#endif
3473 }
Steven Rostedt81698832012-10-11 10:15:05 -04003474
3475 if (mask == TRACE_ITER_PRINTK)
3476 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003477
3478 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003479}
3480
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003481static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003483 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003485 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003486 int i;
3487
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003488 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003489
Li Zefan8d18eaa2009-12-08 11:17:06 +08003490 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491 neg = 1;
3492 cmp += 2;
3493 }
3494
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003495 mutex_lock(&trace_types_lock);
3496
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003497 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003498 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003499 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003500 break;
3501 }
3502 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003503
3504 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003505 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003506 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003507
3508 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003509
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003510 return ret;
3511}
3512
3513static ssize_t
3514tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3515 size_t cnt, loff_t *ppos)
3516{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003517 struct seq_file *m = filp->private_data;
3518 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003519 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003520 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003521
3522 if (cnt >= sizeof(buf))
3523 return -EINVAL;
3524
3525 if (copy_from_user(&buf, ubuf, cnt))
3526 return -EFAULT;
3527
Steven Rostedta8dd2172013-01-09 20:54:17 -05003528 buf[cnt] = 0;
3529
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003530 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003531 if (ret < 0)
3532 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003533
Jiri Olsacf8517c2009-10-23 19:36:16 -04003534 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535
3536 return cnt;
3537}
3538
Li Zefanfdb372e2009-12-08 11:15:59 +08003539static int tracing_trace_options_open(struct inode *inode, struct file *file)
3540{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003541 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003542 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003543
Li Zefanfdb372e2009-12-08 11:15:59 +08003544 if (tracing_disabled)
3545 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003546
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003547 if (trace_array_get(tr) < 0)
3548 return -ENODEV;
3549
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003550 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3551 if (ret < 0)
3552 trace_array_put(tr);
3553
3554 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003555}
3556
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003557static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003558 .open = tracing_trace_options_open,
3559 .read = seq_read,
3560 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003561 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003562 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003563};
3564
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003565static const char readme_msg[] =
3566 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003567 "# echo 0 > tracing_on : quick way to disable tracing\n"
3568 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3569 " Important files:\n"
3570 " trace\t\t\t- The static contents of the buffer\n"
3571 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3572 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3573 " current_tracer\t- function and latency tracers\n"
3574 " available_tracers\t- list of configured tracers for current_tracer\n"
3575 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3576 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3577 " trace_clock\t\t-change the clock used to order events\n"
3578 " local: Per cpu clock but may not be synced across CPUs\n"
3579 " global: Synced across CPUs but slows tracing down.\n"
3580 " counter: Not a clock, but just an increment\n"
3581 " uptime: Jiffy counter from time of boot\n"
3582 " perf: Same clock that perf events use\n"
3583#ifdef CONFIG_X86_64
3584 " x86-tsc: TSC cycle counter\n"
3585#endif
3586 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3587 " tracing_cpumask\t- Limit which CPUs to trace\n"
3588 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3589 "\t\t\t Remove sub-buffer with rmdir\n"
3590 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3592 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003593#ifdef CONFIG_DYNAMIC_FTRACE
3594 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003595 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3596 "\t\t\t functions\n"
3597 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3598 "\t modules: Can select a group via module\n"
3599 "\t Format: :mod:<module-name>\n"
3600 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3601 "\t triggers: a command to perform when function is hit\n"
3602 "\t Format: <function>:<trigger>[:count]\n"
3603 "\t trigger: traceon, traceoff\n"
3604 "\t\t enable_event:<system>:<event>\n"
3605 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003606#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003607 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003608#endif
3609#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003610 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003611#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003612 "\t\t dump\n"
3613 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003614 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3615 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3616 "\t The first one will disable tracing every time do_fault is hit\n"
3617 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3618 "\t The first time do trap is hit and it disables tracing, the\n"
3619 "\t counter will decrement to 2. If tracing is already disabled,\n"
3620 "\t the counter will not decrement. It only decrements when the\n"
3621 "\t trigger did work\n"
3622 "\t To remove trigger without count:\n"
3623 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3624 "\t To remove trigger with a count:\n"
3625 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003626 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003627 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3628 "\t modules: Can select a group via module command :mod:\n"
3629 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003630#endif /* CONFIG_DYNAMIC_FTRACE */
3631#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003632 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3633 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003634#endif
3635#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3636 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3637 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3638#endif
3639#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003640 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3641 "\t\t\t snapshot buffer. Read the contents for more\n"
3642 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003643#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003644#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003645 " stack_trace\t\t- Shows the max stack trace when active\n"
3646 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003647 "\t\t\t Write into this file to reset the max size (trigger a\n"
3648 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003649#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003650 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3651 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003652#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003653#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003654 " events/\t\t- Directory containing all trace event subsystems:\n"
3655 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3656 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003657 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3658 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003659 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003660 " events/<system>/<event>/\t- Directory containing control files for\n"
3661 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003662 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3663 " filter\t\t- If set, only events passing filter are traced\n"
3664 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003665 "\t Format: <trigger>[:count][if <filter>]\n"
3666 "\t trigger: traceon, traceoff\n"
3667 "\t enable_event:<system>:<event>\n"
3668 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003669#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003670 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003671#endif
3672#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003673 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003674#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003675 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3676 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3677 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3678 "\t events/block/block_unplug/trigger\n"
3679 "\t The first disables tracing every time block_unplug is hit.\n"
3680 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3681 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3682 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3683 "\t Like function triggers, the counter is only decremented if it\n"
3684 "\t enabled or disabled tracing.\n"
3685 "\t To remove a trigger without a count:\n"
3686 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3687 "\t To remove a trigger with a count:\n"
3688 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3689 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003690;
3691
3692static ssize_t
3693tracing_readme_read(struct file *filp, char __user *ubuf,
3694 size_t cnt, loff_t *ppos)
3695{
3696 return simple_read_from_buffer(ubuf, cnt, ppos,
3697 readme_msg, strlen(readme_msg));
3698}
3699
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003700static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003701 .open = tracing_open_generic,
3702 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003703 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003704};
3705
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003706static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003707tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3708 size_t cnt, loff_t *ppos)
3709{
3710 char *buf_comm;
3711 char *file_buf;
3712 char *buf;
3713 int len = 0;
3714 int pid;
3715 int i;
3716
3717 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3718 if (!file_buf)
3719 return -ENOMEM;
3720
3721 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3722 if (!buf_comm) {
3723 kfree(file_buf);
3724 return -ENOMEM;
3725 }
3726
3727 buf = file_buf;
3728
3729 for (i = 0; i < SAVED_CMDLINES; i++) {
3730 int r;
3731
3732 pid = map_cmdline_to_pid[i];
3733 if (pid == -1 || pid == NO_CMDLINE_MAP)
3734 continue;
3735
3736 trace_find_cmdline(pid, buf_comm);
3737 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3738 buf += r;
3739 len += r;
3740 }
3741
3742 len = simple_read_from_buffer(ubuf, cnt, ppos,
3743 file_buf, len);
3744
3745 kfree(file_buf);
3746 kfree(buf_comm);
3747
3748 return len;
3749}
3750
3751static const struct file_operations tracing_saved_cmdlines_fops = {
3752 .open = tracing_open_generic,
3753 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003754 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003755};
3756
3757static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003758tracing_set_trace_read(struct file *filp, char __user *ubuf,
3759 size_t cnt, loff_t *ppos)
3760{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003761 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003762 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003763 int r;
3764
3765 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003766 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767 mutex_unlock(&trace_types_lock);
3768
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003769 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003770}
3771
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003772int tracer_init(struct tracer *t, struct trace_array *tr)
3773{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003774 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003775 return t->init(tr);
3776}
3777
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003778static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003779{
3780 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003781
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003782 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003783 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003784}
3785
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003786#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003787/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003788static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3789 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003790{
3791 int cpu, ret = 0;
3792
3793 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3794 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003795 ret = ring_buffer_resize(trace_buf->buffer,
3796 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003797 if (ret < 0)
3798 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003799 per_cpu_ptr(trace_buf->data, cpu)->entries =
3800 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003801 }
3802 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003803 ret = ring_buffer_resize(trace_buf->buffer,
3804 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003805 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003806 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3807 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003808 }
3809
3810 return ret;
3811}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003812#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003813
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003814static int __tracing_resize_ring_buffer(struct trace_array *tr,
3815 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003816{
3817 int ret;
3818
3819 /*
3820 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003821 * we use the size that was given, and we can forget about
3822 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003823 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003824 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003825
Steven Rostedtb382ede62012-10-10 21:44:34 -04003826 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003827 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003828 return 0;
3829
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003830 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003831 if (ret < 0)
3832 return ret;
3833
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003834#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003835 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3836 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003837 goto out;
3838
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003839 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003840 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003841 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3842 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003843 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003844 /*
3845 * AARGH! We are left with different
3846 * size max buffer!!!!
3847 * The max buffer is our "snapshot" buffer.
3848 * When a tracer needs a snapshot (one of the
3849 * latency tracers), it swaps the max buffer
3850 * with the saved snap shot. We succeeded to
3851 * update the size of the main buffer, but failed to
3852 * update the size of the max buffer. But when we tried
3853 * to reset the main buffer to the original size, we
3854 * failed there too. This is very unlikely to
3855 * happen, but if it does, warn and kill all
3856 * tracing.
3857 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003858 WARN_ON(1);
3859 tracing_disabled = 1;
3860 }
3861 return ret;
3862 }
3863
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003864 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003865 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003866 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003867 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003868
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003869 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003870#endif /* CONFIG_TRACER_MAX_TRACE */
3871
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003872 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003873 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003874 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003875 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003876
3877 return ret;
3878}
3879
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003880static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3881 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003882{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003883 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003884
3885 mutex_lock(&trace_types_lock);
3886
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003887 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3888 /* make sure, this cpu is enabled in the mask */
3889 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3890 ret = -EINVAL;
3891 goto out;
3892 }
3893 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003895 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003896 if (ret < 0)
3897 ret = -ENOMEM;
3898
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003899out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003900 mutex_unlock(&trace_types_lock);
3901
3902 return ret;
3903}
3904
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003905
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003906/**
3907 * tracing_update_buffers - used by tracing facility to expand ring buffers
3908 *
3909 * To save on memory when the tracing is never used on a system with it
3910 * configured in. The ring buffers are set to a minimum size. But once
3911 * a user starts to use the tracing facility, then they need to grow
3912 * to their default size.
3913 *
3914 * This function is to be called when a tracer is about to be used.
3915 */
3916int tracing_update_buffers(void)
3917{
3918 int ret = 0;
3919
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003920 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003921 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003922 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003923 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003924 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003925
3926 return ret;
3927}
3928
Steven Rostedt577b7852009-02-26 23:43:05 -05003929struct trace_option_dentry;
3930
3931static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003932create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003933
3934static void
3935destroy_trace_option_files(struct trace_option_dentry *topts);
3936
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003937/*
3938 * Used to clear out the tracer before deletion of an instance.
3939 * Must have trace_types_lock held.
3940 */
3941static void tracing_set_nop(struct trace_array *tr)
3942{
3943 if (tr->current_trace == &nop_trace)
3944 return;
3945
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003946 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003947
3948 if (tr->current_trace->reset)
3949 tr->current_trace->reset(tr);
3950
3951 tr->current_trace = &nop_trace;
3952}
3953
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003954static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003955{
Steven Rostedt577b7852009-02-26 23:43:05 -05003956 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003957 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003958#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003959 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003960#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003961 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003962
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003963 mutex_lock(&trace_types_lock);
3964
Steven Rostedt73c51622009-03-11 13:42:01 -04003965 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003966 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003967 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003968 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003969 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003970 ret = 0;
3971 }
3972
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003973 for (t = trace_types; t; t = t->next) {
3974 if (strcmp(t->name, buf) == 0)
3975 break;
3976 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003977 if (!t) {
3978 ret = -EINVAL;
3979 goto out;
3980 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003981 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003982 goto out;
3983
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003984 /* Some tracers are only allowed for the top level buffer */
3985 if (!trace_ok_for_array(t, tr)) {
3986 ret = -EINVAL;
3987 goto out;
3988 }
3989
Steven Rostedt9f029e82008-11-12 15:24:24 -05003990 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003991
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003992 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003993
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003994 if (tr->current_trace->reset)
3995 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003996
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003997 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003998 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003999
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004000#ifdef CONFIG_TRACER_MAX_TRACE
4001 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004002
4003 if (had_max_tr && !t->use_max_tr) {
4004 /*
4005 * We need to make sure that the update_max_tr sees that
4006 * current_trace changed to nop_trace to keep it from
4007 * swapping the buffers after we resize it.
4008 * The update_max_tr is called from interrupts disabled
4009 * so a synchronized_sched() is sufficient.
4010 */
4011 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004012 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004013 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004014#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004015 /* Currently, only the top instance has options */
4016 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4017 destroy_trace_option_files(topts);
4018 topts = create_trace_option_files(tr, t);
4019 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004020
4021#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004022 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004023 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004024 if (ret < 0)
4025 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004026 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004027#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004028
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004029 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004030 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004031 if (ret)
4032 goto out;
4033 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004035 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004036 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004037 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004038 out:
4039 mutex_unlock(&trace_types_lock);
4040
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004041 return ret;
4042}
4043
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004044static ssize_t
4045tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4046 size_t cnt, loff_t *ppos)
4047{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004048 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004049 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004050 int i;
4051 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004052 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004053
Steven Rostedt60063a62008-10-28 10:44:24 -04004054 ret = cnt;
4055
Li Zefanee6c2c12009-09-18 14:06:47 +08004056 if (cnt > MAX_TRACER_SIZE)
4057 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004058
4059 if (copy_from_user(&buf, ubuf, cnt))
4060 return -EFAULT;
4061
4062 buf[cnt] = 0;
4063
4064 /* strip ending whitespace. */
4065 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4066 buf[i] = 0;
4067
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004068 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004069 if (err)
4070 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004071
Jiri Olsacf8517c2009-10-23 19:36:16 -04004072 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004073
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004074 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004075}
4076
4077static ssize_t
4078tracing_max_lat_read(struct file *filp, char __user *ubuf,
4079 size_t cnt, loff_t *ppos)
4080{
4081 unsigned long *ptr = filp->private_data;
4082 char buf[64];
4083 int r;
4084
Steven Rostedtcffae432008-05-12 21:21:00 +02004085 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004087 if (r > sizeof(buf))
4088 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004089 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004090}
4091
4092static ssize_t
4093tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4094 size_t cnt, loff_t *ppos)
4095{
Hannes Eder5e398412009-02-10 19:44:34 +01004096 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004097 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004098 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099
Peter Huewe22fe9b52011-06-07 21:58:27 +02004100 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4101 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004102 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004103
4104 *ptr = val * 1000;
4105
4106 return cnt;
4107}
4108
Steven Rostedtb3806b42008-05-12 21:20:46 +02004109static int tracing_open_pipe(struct inode *inode, struct file *filp)
4110{
Oleg Nesterov15544202013-07-23 17:25:57 +02004111 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004112 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004113 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004114
4115 if (tracing_disabled)
4116 return -ENODEV;
4117
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004118 if (trace_array_get(tr) < 0)
4119 return -ENODEV;
4120
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004121 mutex_lock(&trace_types_lock);
4122
Steven Rostedtb3806b42008-05-12 21:20:46 +02004123 /* create a buffer to store the information to pass to userspace */
4124 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004125 if (!iter) {
4126 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004127 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004128 goto out;
4129 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004130
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004131 /*
4132 * We make a copy of the current tracer to avoid concurrent
4133 * changes on it while we are reading.
4134 */
4135 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4136 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004137 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004138 goto fail;
4139 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004140 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004141
4142 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4143 ret = -ENOMEM;
4144 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304145 }
4146
Steven Rostedta3097202008-11-07 22:36:02 -05004147 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304148 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004149
Steven Rostedt112f38a72009-06-01 15:16:05 -04004150 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4151 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4152
David Sharp8be07092012-11-13 12:18:22 -08004153 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004154 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004155 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4156
Oleg Nesterov15544202013-07-23 17:25:57 +02004157 iter->tr = tr;
4158 iter->trace_buffer = &tr->trace_buffer;
4159 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004160 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004161 filp->private_data = iter;
4162
Steven Rostedt107bad82008-05-12 21:21:01 +02004163 if (iter->trace->pipe_open)
4164 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004165
Arnd Bergmannb4447862010-07-07 23:40:11 +02004166 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004167out:
4168 mutex_unlock(&trace_types_lock);
4169 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004170
4171fail:
4172 kfree(iter->trace);
4173 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004174 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004175 mutex_unlock(&trace_types_lock);
4176 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004177}
4178
4179static int tracing_release_pipe(struct inode *inode, struct file *file)
4180{
4181 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004182 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004183
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004184 mutex_lock(&trace_types_lock);
4185
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004186 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004187 iter->trace->pipe_close(iter);
4188
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004189 mutex_unlock(&trace_types_lock);
4190
Rusty Russell44623442009-01-01 10:12:23 +10304191 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004192 mutex_destroy(&iter->mutex);
4193 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004194 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004195
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004196 trace_array_put(tr);
4197
Steven Rostedtb3806b42008-05-12 21:20:46 +02004198 return 0;
4199}
4200
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004201static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004202trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004203{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004204 /* Iterators are static, they should be filled or empty */
4205 if (trace_buffer_iter(iter, iter->cpu_file))
4206 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004207
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004208 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004209 /*
4210 * Always select as readable when in blocking mode
4211 */
4212 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004213 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004214 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004215 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004216}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004217
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004218static unsigned int
4219tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4220{
4221 struct trace_iterator *iter = filp->private_data;
4222
4223 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004224}
4225
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004226/*
4227 * This is a make-shift waitqueue.
4228 * A tracer might use this callback on some rare cases:
4229 *
4230 * 1) the current tracer might hold the runqueue lock when it wakes up
4231 * a reader, hence a deadlock (sched, function, and function graph tracers)
4232 * 2) the function tracers, trace all functions, we don't want
4233 * the overhead of calling wake_up and friends
4234 * (and tracing them too)
4235 *
4236 * Anyway, this is really very primitive wakeup.
4237 */
4238void poll_wait_pipe(struct trace_iterator *iter)
4239{
4240 set_current_state(TASK_INTERRUPTIBLE);
4241 /* sleep for 100 msecs, and try again. */
4242 schedule_timeout(HZ / 10);
4243}
4244
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004245/* Must be called with trace_types_lock mutex held. */
4246static int tracing_wait_pipe(struct file *filp)
4247{
4248 struct trace_iterator *iter = filp->private_data;
4249
4250 while (trace_empty(iter)) {
4251
4252 if ((filp->f_flags & O_NONBLOCK)) {
4253 return -EAGAIN;
4254 }
4255
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004256 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004257
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004258 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004259
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004260 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004261
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004262 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004263 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004264
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004265 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004266 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004267 * We still block if tracing is disabled, but we have never
4268 * read anything. This allows a user to cat this file, and
4269 * then enable tracing. But after we have read something,
4270 * we give an EOF when tracing is again disabled.
4271 *
4272 * iter->pos will be 0 if we haven't read anything.
4273 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004274 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004275 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004276 }
4277
4278 return 1;
4279}
4280
Steven Rostedtb3806b42008-05-12 21:20:46 +02004281/*
4282 * Consumer reader.
4283 */
4284static ssize_t
4285tracing_read_pipe(struct file *filp, char __user *ubuf,
4286 size_t cnt, loff_t *ppos)
4287{
4288 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004289 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004290 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004291
4292 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004293 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4294 if (sret != -EBUSY)
4295 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004296
Steven Rostedtf9520752009-03-02 14:04:40 -05004297 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004298
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004299 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004300 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004301 if (unlikely(iter->trace->name != tr->current_trace->name))
4302 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004303 mutex_unlock(&trace_types_lock);
4304
4305 /*
4306 * Avoid more than one consumer on a single file descriptor
4307 * This is just a matter of traces coherency, the ring buffer itself
4308 * is protected.
4309 */
4310 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004311 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004312 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4313 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004314 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004315 }
4316
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004317waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004318 sret = tracing_wait_pipe(filp);
4319 if (sret <= 0)
4320 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004321
4322 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004323 if (trace_empty(iter)) {
4324 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004325 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004326 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004327
4328 if (cnt >= PAGE_SIZE)
4329 cnt = PAGE_SIZE - 1;
4330
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004331 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004332 memset(&iter->seq, 0,
4333 sizeof(struct trace_iterator) -
4334 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004335 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004336 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004337
Lai Jiangshan4f535962009-05-18 19:35:34 +08004338 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004339 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004340 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004341 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004342 int len = iter->seq.len;
4343
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004344 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004345 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004346 /* don't print partial lines */
4347 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004348 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004349 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004350 if (ret != TRACE_TYPE_NO_CONSUME)
4351 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004352
4353 if (iter->seq.len >= cnt)
4354 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004355
4356 /*
4357 * Setting the full flag means we reached the trace_seq buffer
4358 * size and we should leave by partial output condition above.
4359 * One of the trace_seq_* functions is not used properly.
4360 */
4361 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4362 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004363 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004364 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004365 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004366
Steven Rostedtb3806b42008-05-12 21:20:46 +02004367 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004368 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4369 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004370 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004371
4372 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004373 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004374 * entries, go back to wait for more entries.
4375 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004376 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004377 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004378
Steven Rostedt107bad82008-05-12 21:21:01 +02004379out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004380 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004381
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004382 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004383}
4384
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004385static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4386 unsigned int idx)
4387{
4388 __free_page(spd->pages[idx]);
4389}
4390
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004391static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004392 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004393 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004394 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004395 .steal = generic_pipe_buf_steal,
4396 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004397};
4398
Steven Rostedt34cd4992009-02-09 12:06:29 -05004399static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004400tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004401{
4402 size_t count;
4403 int ret;
4404
4405 /* Seq buffer is page-sized, exactly what we need. */
4406 for (;;) {
4407 count = iter->seq.len;
4408 ret = print_trace_line(iter);
4409 count = iter->seq.len - count;
4410 if (rem < count) {
4411 rem = 0;
4412 iter->seq.len -= count;
4413 break;
4414 }
4415 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4416 iter->seq.len -= count;
4417 break;
4418 }
4419
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004420 if (ret != TRACE_TYPE_NO_CONSUME)
4421 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004422 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004423 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004424 rem = 0;
4425 iter->ent = NULL;
4426 break;
4427 }
4428 }
4429
4430 return rem;
4431}
4432
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004433static ssize_t tracing_splice_read_pipe(struct file *filp,
4434 loff_t *ppos,
4435 struct pipe_inode_info *pipe,
4436 size_t len,
4437 unsigned int flags)
4438{
Jens Axboe35f3d142010-05-20 10:43:18 +02004439 struct page *pages_def[PIPE_DEF_BUFFERS];
4440 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004441 struct trace_iterator *iter = filp->private_data;
4442 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004443 .pages = pages_def,
4444 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004445 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004446 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004447 .flags = flags,
4448 .ops = &tracing_pipe_buf_ops,
4449 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004450 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004451 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004452 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004453 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004454 unsigned int i;
4455
Jens Axboe35f3d142010-05-20 10:43:18 +02004456 if (splice_grow_spd(pipe, &spd))
4457 return -ENOMEM;
4458
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004459 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004460 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004461 if (unlikely(iter->trace->name != tr->current_trace->name))
4462 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004463 mutex_unlock(&trace_types_lock);
4464
4465 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004466
4467 if (iter->trace->splice_read) {
4468 ret = iter->trace->splice_read(iter, filp,
4469 ppos, pipe, len, flags);
4470 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004471 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004472 }
4473
4474 ret = tracing_wait_pipe(filp);
4475 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004476 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004477
Jason Wessel955b61e2010-08-05 09:22:23 -05004478 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004479 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004480 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004481 }
4482
Lai Jiangshan4f535962009-05-18 19:35:34 +08004483 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004484 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004485
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004486 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004487 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004488 spd.pages[i] = alloc_page(GFP_KERNEL);
4489 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004490 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004491
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004492 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004493
4494 /* Copy the data into the page, so we can start over. */
4495 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004496 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004497 iter->seq.len);
4498 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004499 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004500 break;
4501 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004502 spd.partial[i].offset = 0;
4503 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004504
Steven Rostedtf9520752009-03-02 14:04:40 -05004505 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004506 }
4507
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004508 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004509 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004510 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004511
4512 spd.nr_pages = i;
4513
Jens Axboe35f3d142010-05-20 10:43:18 +02004514 ret = splice_to_pipe(pipe, &spd);
4515out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004516 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004517 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004518
Steven Rostedt34cd4992009-02-09 12:06:29 -05004519out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004520 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004521 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004522}
4523
Steven Rostedta98a3c32008-05-12 21:20:59 +02004524static ssize_t
4525tracing_entries_read(struct file *filp, char __user *ubuf,
4526 size_t cnt, loff_t *ppos)
4527{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004528 struct inode *inode = file_inode(filp);
4529 struct trace_array *tr = inode->i_private;
4530 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004531 char buf[64];
4532 int r = 0;
4533 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004534
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004535 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004536
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004537 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004538 int cpu, buf_size_same;
4539 unsigned long size;
4540
4541 size = 0;
4542 buf_size_same = 1;
4543 /* check if all cpu sizes are same */
4544 for_each_tracing_cpu(cpu) {
4545 /* fill in the size from first enabled cpu */
4546 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004547 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4548 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004549 buf_size_same = 0;
4550 break;
4551 }
4552 }
4553
4554 if (buf_size_same) {
4555 if (!ring_buffer_expanded)
4556 r = sprintf(buf, "%lu (expanded: %lu)\n",
4557 size >> 10,
4558 trace_buf_size >> 10);
4559 else
4560 r = sprintf(buf, "%lu\n", size >> 10);
4561 } else
4562 r = sprintf(buf, "X\n");
4563 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004564 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004565
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004566 mutex_unlock(&trace_types_lock);
4567
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004568 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4569 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004570}
4571
4572static ssize_t
4573tracing_entries_write(struct file *filp, const char __user *ubuf,
4574 size_t cnt, loff_t *ppos)
4575{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004576 struct inode *inode = file_inode(filp);
4577 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004578 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004579 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004580
Peter Huewe22fe9b52011-06-07 21:58:27 +02004581 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4582 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004583 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004584
4585 /* must have at least 1 entry */
4586 if (!val)
4587 return -EINVAL;
4588
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004589 /* value is in KB */
4590 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004591 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004592 if (ret < 0)
4593 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004594
Jiri Olsacf8517c2009-10-23 19:36:16 -04004595 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004596
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004597 return cnt;
4598}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004599
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004600static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004601tracing_total_entries_read(struct file *filp, char __user *ubuf,
4602 size_t cnt, loff_t *ppos)
4603{
4604 struct trace_array *tr = filp->private_data;
4605 char buf[64];
4606 int r, cpu;
4607 unsigned long size = 0, expanded_size = 0;
4608
4609 mutex_lock(&trace_types_lock);
4610 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004611 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004612 if (!ring_buffer_expanded)
4613 expanded_size += trace_buf_size >> 10;
4614 }
4615 if (ring_buffer_expanded)
4616 r = sprintf(buf, "%lu\n", size);
4617 else
4618 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4619 mutex_unlock(&trace_types_lock);
4620
4621 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4622}
4623
4624static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004625tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4626 size_t cnt, loff_t *ppos)
4627{
4628 /*
4629 * There is no need to read what the user has written, this function
4630 * is just to make sure that there is no error when "echo" is used
4631 */
4632
4633 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004634
4635 return cnt;
4636}
4637
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004638static int
4639tracing_free_buffer_release(struct inode *inode, struct file *filp)
4640{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004641 struct trace_array *tr = inode->i_private;
4642
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004643 /* disable tracing ? */
4644 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004645 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004646 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004647 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004648
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004649 trace_array_put(tr);
4650
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004651 return 0;
4652}
4653
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004654static ssize_t
4655tracing_mark_write(struct file *filp, const char __user *ubuf,
4656 size_t cnt, loff_t *fpos)
4657{
Steven Rostedtd696b582011-09-22 11:50:27 -04004658 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004659 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004660 struct ring_buffer_event *event;
4661 struct ring_buffer *buffer;
4662 struct print_entry *entry;
4663 unsigned long irq_flags;
4664 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004665 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004666 int nr_pages = 1;
4667 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004668 int offset;
4669 int size;
4670 int len;
4671 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004672 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004673
Steven Rostedtc76f0692008-11-07 22:36:02 -05004674 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004675 return -EINVAL;
4676
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004677 if (!(trace_flags & TRACE_ITER_MARKERS))
4678 return -EINVAL;
4679
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004680 if (cnt > TRACE_BUF_SIZE)
4681 cnt = TRACE_BUF_SIZE;
4682
Steven Rostedtd696b582011-09-22 11:50:27 -04004683 /*
4684 * Userspace is injecting traces into the kernel trace buffer.
4685 * We want to be as non intrusive as possible.
4686 * To do so, we do not want to allocate any special buffers
4687 * or take any locks, but instead write the userspace data
4688 * straight into the ring buffer.
4689 *
4690 * First we need to pin the userspace buffer into memory,
4691 * which, most likely it is, because it just referenced it.
4692 * But there's no guarantee that it is. By using get_user_pages_fast()
4693 * and kmap_atomic/kunmap_atomic() we can get access to the
4694 * pages directly. We then write the data directly into the
4695 * ring buffer.
4696 */
4697 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004698
Steven Rostedtd696b582011-09-22 11:50:27 -04004699 /* check if we cross pages */
4700 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4701 nr_pages = 2;
4702
4703 offset = addr & (PAGE_SIZE - 1);
4704 addr &= PAGE_MASK;
4705
4706 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4707 if (ret < nr_pages) {
4708 while (--ret >= 0)
4709 put_page(pages[ret]);
4710 written = -EFAULT;
4711 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004712 }
4713
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004714 for (i = 0; i < nr_pages; i++)
4715 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004716
4717 local_save_flags(irq_flags);
4718 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004719 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004720 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4721 irq_flags, preempt_count());
4722 if (!event) {
4723 /* Ring buffer disabled, return as if not open for write */
4724 written = -EBADF;
4725 goto out_unlock;
4726 }
4727
4728 entry = ring_buffer_event_data(event);
4729 entry->ip = _THIS_IP_;
4730
4731 if (nr_pages == 2) {
4732 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004733 memcpy(&entry->buf, map_page[0] + offset, len);
4734 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004735 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004736 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004737
4738 if (entry->buf[cnt - 1] != '\n') {
4739 entry->buf[cnt] = '\n';
4740 entry->buf[cnt + 1] = '\0';
4741 } else
4742 entry->buf[cnt] = '\0';
4743
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004744 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004745
4746 written = cnt;
4747
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004748 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004749
Steven Rostedtd696b582011-09-22 11:50:27 -04004750 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004751 for (i = 0; i < nr_pages; i++){
4752 kunmap_atomic(map_page[i]);
4753 put_page(pages[i]);
4754 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004755 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004756 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004757}
4758
Li Zefan13f16d22009-12-08 11:16:11 +08004759static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004760{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004761 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004762 int i;
4763
4764 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004765 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004766 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004767 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4768 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004769 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004770
Li Zefan13f16d22009-12-08 11:16:11 +08004771 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004772}
4773
Steven Rostedte1e232c2014-02-10 23:38:46 -05004774static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004775{
Zhaolei5079f322009-08-25 16:12:56 +08004776 int i;
4777
Zhaolei5079f322009-08-25 16:12:56 +08004778 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4779 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4780 break;
4781 }
4782 if (i == ARRAY_SIZE(trace_clocks))
4783 return -EINVAL;
4784
Zhaolei5079f322009-08-25 16:12:56 +08004785 mutex_lock(&trace_types_lock);
4786
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004787 tr->clock_id = i;
4788
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004789 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004790
David Sharp60303ed2012-10-11 16:27:52 -07004791 /*
4792 * New clock may not be consistent with the previous clock.
4793 * Reset the buffer so that it doesn't have incomparable timestamps.
4794 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004795 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004796
4797#ifdef CONFIG_TRACER_MAX_TRACE
4798 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4799 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004800 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004801#endif
David Sharp60303ed2012-10-11 16:27:52 -07004802
Zhaolei5079f322009-08-25 16:12:56 +08004803 mutex_unlock(&trace_types_lock);
4804
Steven Rostedte1e232c2014-02-10 23:38:46 -05004805 return 0;
4806}
4807
4808static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4809 size_t cnt, loff_t *fpos)
4810{
4811 struct seq_file *m = filp->private_data;
4812 struct trace_array *tr = m->private;
4813 char buf[64];
4814 const char *clockstr;
4815 int ret;
4816
4817 if (cnt >= sizeof(buf))
4818 return -EINVAL;
4819
4820 if (copy_from_user(&buf, ubuf, cnt))
4821 return -EFAULT;
4822
4823 buf[cnt] = 0;
4824
4825 clockstr = strstrip(buf);
4826
4827 ret = tracing_set_clock(tr, clockstr);
4828 if (ret)
4829 return ret;
4830
Zhaolei5079f322009-08-25 16:12:56 +08004831 *fpos += cnt;
4832
4833 return cnt;
4834}
4835
Li Zefan13f16d22009-12-08 11:16:11 +08004836static int tracing_clock_open(struct inode *inode, struct file *file)
4837{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004838 struct trace_array *tr = inode->i_private;
4839 int ret;
4840
Li Zefan13f16d22009-12-08 11:16:11 +08004841 if (tracing_disabled)
4842 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004843
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004844 if (trace_array_get(tr))
4845 return -ENODEV;
4846
4847 ret = single_open(file, tracing_clock_show, inode->i_private);
4848 if (ret < 0)
4849 trace_array_put(tr);
4850
4851 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004852}
4853
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004854struct ftrace_buffer_info {
4855 struct trace_iterator iter;
4856 void *spare;
4857 unsigned int read;
4858};
4859
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004860#ifdef CONFIG_TRACER_SNAPSHOT
4861static int tracing_snapshot_open(struct inode *inode, struct file *file)
4862{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004863 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004864 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004865 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004866 int ret = 0;
4867
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004868 if (trace_array_get(tr) < 0)
4869 return -ENODEV;
4870
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004871 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004872 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004873 if (IS_ERR(iter))
4874 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004875 } else {
4876 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004877 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004878 m = kzalloc(sizeof(*m), GFP_KERNEL);
4879 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004880 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4882 if (!iter) {
4883 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004884 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004885 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004886 ret = 0;
4887
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004888 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004889 iter->trace_buffer = &tr->max_buffer;
4890 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004891 m->private = iter;
4892 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004893 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004894out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004895 if (ret < 0)
4896 trace_array_put(tr);
4897
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004898 return ret;
4899}
4900
4901static ssize_t
4902tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4903 loff_t *ppos)
4904{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004905 struct seq_file *m = filp->private_data;
4906 struct trace_iterator *iter = m->private;
4907 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004908 unsigned long val;
4909 int ret;
4910
4911 ret = tracing_update_buffers();
4912 if (ret < 0)
4913 return ret;
4914
4915 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4916 if (ret)
4917 return ret;
4918
4919 mutex_lock(&trace_types_lock);
4920
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004921 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004922 ret = -EBUSY;
4923 goto out;
4924 }
4925
4926 switch (val) {
4927 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004928 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4929 ret = -EINVAL;
4930 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004931 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004932 if (tr->allocated_snapshot)
4933 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004934 break;
4935 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004936/* Only allow per-cpu swap if the ring buffer supports it */
4937#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4938 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4939 ret = -EINVAL;
4940 break;
4941 }
4942#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004943 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004944 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004945 if (ret < 0)
4946 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004947 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004948 local_irq_disable();
4949 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004950 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004951 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004952 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004953 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004954 local_irq_enable();
4955 break;
4956 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004957 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004958 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4959 tracing_reset_online_cpus(&tr->max_buffer);
4960 else
4961 tracing_reset(&tr->max_buffer, iter->cpu_file);
4962 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004963 break;
4964 }
4965
4966 if (ret >= 0) {
4967 *ppos += cnt;
4968 ret = cnt;
4969 }
4970out:
4971 mutex_unlock(&trace_types_lock);
4972 return ret;
4973}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004974
4975static int tracing_snapshot_release(struct inode *inode, struct file *file)
4976{
4977 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004978 int ret;
4979
4980 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004981
4982 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004983 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004984
4985 /* If write only, the seq_file is just a stub */
4986 if (m)
4987 kfree(m->private);
4988 kfree(m);
4989
4990 return 0;
4991}
4992
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004993static int tracing_buffers_open(struct inode *inode, struct file *filp);
4994static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4995 size_t count, loff_t *ppos);
4996static int tracing_buffers_release(struct inode *inode, struct file *file);
4997static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4998 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4999
5000static int snapshot_raw_open(struct inode *inode, struct file *filp)
5001{
5002 struct ftrace_buffer_info *info;
5003 int ret;
5004
5005 ret = tracing_buffers_open(inode, filp);
5006 if (ret < 0)
5007 return ret;
5008
5009 info = filp->private_data;
5010
5011 if (info->iter.trace->use_max_tr) {
5012 tracing_buffers_release(inode, filp);
5013 return -EBUSY;
5014 }
5015
5016 info->iter.snapshot = true;
5017 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5018
5019 return ret;
5020}
5021
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005022#endif /* CONFIG_TRACER_SNAPSHOT */
5023
5024
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005025static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005026 .open = tracing_open_generic,
5027 .read = tracing_max_lat_read,
5028 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005029 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005030};
5031
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005032static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005033 .open = tracing_open_generic,
5034 .read = tracing_set_trace_read,
5035 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005036 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005037};
5038
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005039static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005040 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005041 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005042 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005043 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005044 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005045 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005046};
5047
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005048static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005049 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005050 .read = tracing_entries_read,
5051 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005052 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005053 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005054};
5055
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005056static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005057 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005058 .read = tracing_total_entries_read,
5059 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005060 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005061};
5062
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005063static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005064 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005065 .write = tracing_free_buffer_write,
5066 .release = tracing_free_buffer_release,
5067};
5068
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005069static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005070 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005071 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005072 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005073 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005074};
5075
Zhaolei5079f322009-08-25 16:12:56 +08005076static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005077 .open = tracing_clock_open,
5078 .read = seq_read,
5079 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005080 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005081 .write = tracing_clock_write,
5082};
5083
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005084#ifdef CONFIG_TRACER_SNAPSHOT
5085static const struct file_operations snapshot_fops = {
5086 .open = tracing_snapshot_open,
5087 .read = seq_read,
5088 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005089 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005090 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005091};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005092
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005093static const struct file_operations snapshot_raw_fops = {
5094 .open = snapshot_raw_open,
5095 .read = tracing_buffers_read,
5096 .release = tracing_buffers_release,
5097 .splice_read = tracing_buffers_splice_read,
5098 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005099};
5100
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005101#endif /* CONFIG_TRACER_SNAPSHOT */
5102
Steven Rostedt2cadf912008-12-01 22:20:19 -05005103static int tracing_buffers_open(struct inode *inode, struct file *filp)
5104{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005105 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005106 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005107 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005108
5109 if (tracing_disabled)
5110 return -ENODEV;
5111
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005112 if (trace_array_get(tr) < 0)
5113 return -ENODEV;
5114
Steven Rostedt2cadf912008-12-01 22:20:19 -05005115 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005116 if (!info) {
5117 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005118 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005119 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005120
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005121 mutex_lock(&trace_types_lock);
5122
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005123 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005124 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005125 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005126 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005127 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005128 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005129 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005130
5131 filp->private_data = info;
5132
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005133 mutex_unlock(&trace_types_lock);
5134
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005135 ret = nonseekable_open(inode, filp);
5136 if (ret < 0)
5137 trace_array_put(tr);
5138
5139 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005140}
5141
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005142static unsigned int
5143tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5144{
5145 struct ftrace_buffer_info *info = filp->private_data;
5146 struct trace_iterator *iter = &info->iter;
5147
5148 return trace_poll(iter, filp, poll_table);
5149}
5150
Steven Rostedt2cadf912008-12-01 22:20:19 -05005151static ssize_t
5152tracing_buffers_read(struct file *filp, char __user *ubuf,
5153 size_t count, loff_t *ppos)
5154{
5155 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005156 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005157 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005158 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005159
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005160 if (!count)
5161 return 0;
5162
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005163 mutex_lock(&trace_types_lock);
5164
5165#ifdef CONFIG_TRACER_MAX_TRACE
5166 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5167 size = -EBUSY;
5168 goto out_unlock;
5169 }
5170#endif
5171
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005172 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005173 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5174 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005175 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005176 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005177 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005178
Steven Rostedt2cadf912008-12-01 22:20:19 -05005179 /* Do we have previous read data to read? */
5180 if (info->read < PAGE_SIZE)
5181 goto read;
5182
Steven Rostedtb6273442013-02-28 13:44:11 -05005183 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005184 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005185 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005186 &info->spare,
5187 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005188 iter->cpu_file, 0);
5189 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005190
5191 if (ret < 0) {
5192 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005193 if ((filp->f_flags & O_NONBLOCK)) {
5194 size = -EAGAIN;
5195 goto out_unlock;
5196 }
5197 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005198 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005199 mutex_lock(&trace_types_lock);
5200 if (signal_pending(current)) {
5201 size = -EINTR;
5202 goto out_unlock;
5203 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005204 goto again;
5205 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005206 size = 0;
5207 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005208 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005209
Steven Rostedt436fc282011-10-14 10:44:25 -04005210 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005211 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005212 size = PAGE_SIZE - info->read;
5213 if (size > count)
5214 size = count;
5215
5216 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005217 if (ret == size) {
5218 size = -EFAULT;
5219 goto out_unlock;
5220 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005221 size -= ret;
5222
Steven Rostedt2cadf912008-12-01 22:20:19 -05005223 *ppos += size;
5224 info->read += size;
5225
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005226 out_unlock:
5227 mutex_unlock(&trace_types_lock);
5228
Steven Rostedt2cadf912008-12-01 22:20:19 -05005229 return size;
5230}
5231
5232static int tracing_buffers_release(struct inode *inode, struct file *file)
5233{
5234 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005235 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005236
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005237 mutex_lock(&trace_types_lock);
5238
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005239 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005240
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005241 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005242 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005243 kfree(info);
5244
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005245 mutex_unlock(&trace_types_lock);
5246
Steven Rostedt2cadf912008-12-01 22:20:19 -05005247 return 0;
5248}
5249
5250struct buffer_ref {
5251 struct ring_buffer *buffer;
5252 void *page;
5253 int ref;
5254};
5255
5256static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5257 struct pipe_buffer *buf)
5258{
5259 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5260
5261 if (--ref->ref)
5262 return;
5263
5264 ring_buffer_free_read_page(ref->buffer, ref->page);
5265 kfree(ref);
5266 buf->private = 0;
5267}
5268
Steven Rostedt2cadf912008-12-01 22:20:19 -05005269static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5270 struct pipe_buffer *buf)
5271{
5272 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5273
5274 ref->ref++;
5275}
5276
5277/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005278static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005279 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005280 .confirm = generic_pipe_buf_confirm,
5281 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005282 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005283 .get = buffer_pipe_buf_get,
5284};
5285
5286/*
5287 * Callback from splice_to_pipe(), if we need to release some pages
5288 * at the end of the spd in case we error'ed out in filling the pipe.
5289 */
5290static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5291{
5292 struct buffer_ref *ref =
5293 (struct buffer_ref *)spd->partial[i].private;
5294
5295 if (--ref->ref)
5296 return;
5297
5298 ring_buffer_free_read_page(ref->buffer, ref->page);
5299 kfree(ref);
5300 spd->partial[i].private = 0;
5301}
5302
5303static ssize_t
5304tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5305 struct pipe_inode_info *pipe, size_t len,
5306 unsigned int flags)
5307{
5308 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005309 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005310 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5311 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005312 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005313 .pages = pages_def,
5314 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005315 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316 .flags = flags,
5317 .ops = &buffer_pipe_buf_ops,
5318 .spd_release = buffer_spd_release,
5319 };
5320 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005321 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005322 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005323
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005324 mutex_lock(&trace_types_lock);
5325
5326#ifdef CONFIG_TRACER_MAX_TRACE
5327 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5328 ret = -EBUSY;
5329 goto out;
5330 }
5331#endif
5332
5333 if (splice_grow_spd(pipe, &spd)) {
5334 ret = -ENOMEM;
5335 goto out;
5336 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005337
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005338 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005339 ret = -EINVAL;
5340 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005341 }
5342
5343 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005344 if (len < PAGE_SIZE) {
5345 ret = -EINVAL;
5346 goto out;
5347 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005348 len &= PAGE_MASK;
5349 }
5350
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005351 again:
5352 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005353 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005354
Al Viroa786c062014-04-11 12:01:03 -04005355 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005356 struct page *page;
5357 int r;
5358
5359 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5360 if (!ref)
5361 break;
5362
Steven Rostedt7267fa62009-04-29 00:16:21 -04005363 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005364 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005365 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005366 if (!ref->page) {
5367 kfree(ref);
5368 break;
5369 }
5370
5371 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005372 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005373 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005374 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005375 kfree(ref);
5376 break;
5377 }
5378
5379 /*
5380 * zero out any left over data, this is going to
5381 * user land.
5382 */
5383 size = ring_buffer_page_len(ref->page);
5384 if (size < PAGE_SIZE)
5385 memset(ref->page + size, 0, PAGE_SIZE - size);
5386
5387 page = virt_to_page(ref->page);
5388
5389 spd.pages[i] = page;
5390 spd.partial[i].len = PAGE_SIZE;
5391 spd.partial[i].offset = 0;
5392 spd.partial[i].private = (unsigned long)ref;
5393 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005394 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005396 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005397 }
5398
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005399 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005400 spd.nr_pages = i;
5401
5402 /* did we read anything? */
5403 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005404 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005405 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005406 goto out;
5407 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005408 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005409 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005410 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005411 if (signal_pending(current)) {
5412 ret = -EINTR;
5413 goto out;
5414 }
5415 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005416 }
5417
5418 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005419 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005420out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005421 mutex_unlock(&trace_types_lock);
5422
Steven Rostedt2cadf912008-12-01 22:20:19 -05005423 return ret;
5424}
5425
5426static const struct file_operations tracing_buffers_fops = {
5427 .open = tracing_buffers_open,
5428 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005429 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005430 .release = tracing_buffers_release,
5431 .splice_read = tracing_buffers_splice_read,
5432 .llseek = no_llseek,
5433};
5434
Steven Rostedtc8d77182009-04-29 18:03:45 -04005435static ssize_t
5436tracing_stats_read(struct file *filp, char __user *ubuf,
5437 size_t count, loff_t *ppos)
5438{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005439 struct inode *inode = file_inode(filp);
5440 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005441 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005442 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005443 struct trace_seq *s;
5444 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005445 unsigned long long t;
5446 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005447
Li Zefane4f2d102009-06-15 10:57:28 +08005448 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005449 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005450 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005451
5452 trace_seq_init(s);
5453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005454 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005455 trace_seq_printf(s, "entries: %ld\n", cnt);
5456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005458 trace_seq_printf(s, "overrun: %ld\n", cnt);
5459
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005460 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005461 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5462
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005463 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005464 trace_seq_printf(s, "bytes: %ld\n", cnt);
5465
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005466 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005467 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005468 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005469 usec_rem = do_div(t, USEC_PER_SEC);
5470 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5471 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005472
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005473 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005474 usec_rem = do_div(t, USEC_PER_SEC);
5475 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5476 } else {
5477 /* counter or tsc mode for trace_clock */
5478 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005479 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005480
5481 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005482 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005483 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005484
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005485 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005486 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5487
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005488 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005489 trace_seq_printf(s, "read events: %ld\n", cnt);
5490
Steven Rostedtc8d77182009-04-29 18:03:45 -04005491 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5492
5493 kfree(s);
5494
5495 return count;
5496}
5497
5498static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005499 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005500 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005501 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005502 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005503};
5504
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005505#ifdef CONFIG_DYNAMIC_FTRACE
5506
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005507int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005508{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005509 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005510}
5511
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005512static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005513tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005514 size_t cnt, loff_t *ppos)
5515{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005516 static char ftrace_dyn_info_buffer[1024];
5517 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005518 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005519 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005520 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005521 int r;
5522
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005523 mutex_lock(&dyn_info_mutex);
5524 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005525
Steven Rostedta26a2a22008-10-31 00:03:22 -04005526 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005527 buf[r++] = '\n';
5528
5529 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5530
5531 mutex_unlock(&dyn_info_mutex);
5532
5533 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005534}
5535
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005536static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005537 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005538 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005539 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005540};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005541#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005542
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005543#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5544static void
5545ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005546{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005547 tracing_snapshot();
5548}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005549
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005550static void
5551ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5552{
5553 unsigned long *count = (long *)data;
5554
5555 if (!*count)
5556 return;
5557
5558 if (*count != -1)
5559 (*count)--;
5560
5561 tracing_snapshot();
5562}
5563
5564static int
5565ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5566 struct ftrace_probe_ops *ops, void *data)
5567{
5568 long count = (long)data;
5569
5570 seq_printf(m, "%ps:", (void *)ip);
5571
5572 seq_printf(m, "snapshot");
5573
5574 if (count == -1)
5575 seq_printf(m, ":unlimited\n");
5576 else
5577 seq_printf(m, ":count=%ld\n", count);
5578
5579 return 0;
5580}
5581
5582static struct ftrace_probe_ops snapshot_probe_ops = {
5583 .func = ftrace_snapshot,
5584 .print = ftrace_snapshot_print,
5585};
5586
5587static struct ftrace_probe_ops snapshot_count_probe_ops = {
5588 .func = ftrace_count_snapshot,
5589 .print = ftrace_snapshot_print,
5590};
5591
5592static int
5593ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5594 char *glob, char *cmd, char *param, int enable)
5595{
5596 struct ftrace_probe_ops *ops;
5597 void *count = (void *)-1;
5598 char *number;
5599 int ret;
5600
5601 /* hash funcs only work with set_ftrace_filter */
5602 if (!enable)
5603 return -EINVAL;
5604
5605 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5606
5607 if (glob[0] == '!') {
5608 unregister_ftrace_function_probe_func(glob+1, ops);
5609 return 0;
5610 }
5611
5612 if (!param)
5613 goto out_reg;
5614
5615 number = strsep(&param, ":");
5616
5617 if (!strlen(number))
5618 goto out_reg;
5619
5620 /*
5621 * We use the callback data field (which is a pointer)
5622 * as our counter.
5623 */
5624 ret = kstrtoul(number, 0, (unsigned long *)&count);
5625 if (ret)
5626 return ret;
5627
5628 out_reg:
5629 ret = register_ftrace_function_probe(glob, ops, count);
5630
5631 if (ret >= 0)
5632 alloc_snapshot(&global_trace);
5633
5634 return ret < 0 ? ret : 0;
5635}
5636
5637static struct ftrace_func_command ftrace_snapshot_cmd = {
5638 .name = "snapshot",
5639 .func = ftrace_trace_snapshot_callback,
5640};
5641
Tom Zanussi38de93a2013-10-24 08:34:18 -05005642static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005643{
5644 return register_ftrace_command(&ftrace_snapshot_cmd);
5645}
5646#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005647static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005648#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005649
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005650struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005651{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005652 if (tr->dir)
5653 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005654
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005655 if (!debugfs_initialized())
5656 return NULL;
5657
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005658 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5659 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005660
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005661 if (!tr->dir)
5662 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005663
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005664 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005665}
5666
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005667struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005668{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005669 return tracing_init_dentry_tr(&global_trace);
5670}
5671
5672static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5673{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005674 struct dentry *d_tracer;
5675
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005676 if (tr->percpu_dir)
5677 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005678
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005679 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005680 if (!d_tracer)
5681 return NULL;
5682
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005683 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005684
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005685 WARN_ONCE(!tr->percpu_dir,
5686 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005687
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005688 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005689}
5690
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005691static struct dentry *
5692trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5693 void *data, long cpu, const struct file_operations *fops)
5694{
5695 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5696
5697 if (ret) /* See tracing_get_cpu() */
5698 ret->d_inode->i_cdev = (void *)(cpu + 1);
5699 return ret;
5700}
5701
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005702static void
5703tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005704{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005705 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005706 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005707 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005708
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005709 if (!d_percpu)
5710 return;
5711
Steven Rostedtdd49a382010-10-20 21:51:26 -04005712 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005713 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5714 if (!d_cpu) {
5715 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5716 return;
5717 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005718
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005719 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005720 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005721 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005722
5723 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005724 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005725 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005726
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005727 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005728 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005729
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005730 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005731 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005732
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005733 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005734 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005735
5736#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005737 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005738 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005739
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005740 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005741 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005742#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005743}
5744
Steven Rostedt60a11772008-05-12 21:20:44 +02005745#ifdef CONFIG_FTRACE_SELFTEST
5746/* Let selftest have access to static functions in this file */
5747#include "trace_selftest.c"
5748#endif
5749
Steven Rostedt577b7852009-02-26 23:43:05 -05005750struct trace_option_dentry {
5751 struct tracer_opt *opt;
5752 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005753 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005754 struct dentry *entry;
5755};
5756
5757static ssize_t
5758trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5759 loff_t *ppos)
5760{
5761 struct trace_option_dentry *topt = filp->private_data;
5762 char *buf;
5763
5764 if (topt->flags->val & topt->opt->bit)
5765 buf = "1\n";
5766 else
5767 buf = "0\n";
5768
5769 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5770}
5771
5772static ssize_t
5773trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5774 loff_t *ppos)
5775{
5776 struct trace_option_dentry *topt = filp->private_data;
5777 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005778 int ret;
5779
Peter Huewe22fe9b52011-06-07 21:58:27 +02005780 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5781 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005782 return ret;
5783
Li Zefan8d18eaa2009-12-08 11:17:06 +08005784 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005785 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005786
5787 if (!!(topt->flags->val & topt->opt->bit) != val) {
5788 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005789 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005790 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005791 mutex_unlock(&trace_types_lock);
5792 if (ret)
5793 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005794 }
5795
5796 *ppos += cnt;
5797
5798 return cnt;
5799}
5800
5801
5802static const struct file_operations trace_options_fops = {
5803 .open = tracing_open_generic,
5804 .read = trace_options_read,
5805 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005806 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005807};
5808
Steven Rostedta8259072009-02-26 22:19:12 -05005809static ssize_t
5810trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5811 loff_t *ppos)
5812{
5813 long index = (long)filp->private_data;
5814 char *buf;
5815
5816 if (trace_flags & (1 << index))
5817 buf = "1\n";
5818 else
5819 buf = "0\n";
5820
5821 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5822}
5823
5824static ssize_t
5825trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5826 loff_t *ppos)
5827{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005828 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005829 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005830 unsigned long val;
5831 int ret;
5832
Peter Huewe22fe9b52011-06-07 21:58:27 +02005833 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5834 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005835 return ret;
5836
Zhaoleif2d84b62009-08-07 18:55:48 +08005837 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005838 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005839
5840 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005842 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005843
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005844 if (ret < 0)
5845 return ret;
5846
Steven Rostedta8259072009-02-26 22:19:12 -05005847 *ppos += cnt;
5848
5849 return cnt;
5850}
5851
Steven Rostedta8259072009-02-26 22:19:12 -05005852static const struct file_operations trace_options_core_fops = {
5853 .open = tracing_open_generic,
5854 .read = trace_options_core_read,
5855 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005856 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005857};
5858
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005859struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005860 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005861 struct dentry *parent,
5862 void *data,
5863 const struct file_operations *fops)
5864{
5865 struct dentry *ret;
5866
5867 ret = debugfs_create_file(name, mode, parent, data, fops);
5868 if (!ret)
5869 pr_warning("Could not create debugfs '%s' entry\n", name);
5870
5871 return ret;
5872}
5873
5874
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005875static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005876{
5877 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005878
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005879 if (tr->options)
5880 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005881
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005882 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005883 if (!d_tracer)
5884 return NULL;
5885
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005886 tr->options = debugfs_create_dir("options", d_tracer);
5887 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005888 pr_warning("Could not create debugfs directory 'options'\n");
5889 return NULL;
5890 }
5891
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005892 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005893}
5894
Steven Rostedt577b7852009-02-26 23:43:05 -05005895static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005896create_trace_option_file(struct trace_array *tr,
5897 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005898 struct tracer_flags *flags,
5899 struct tracer_opt *opt)
5900{
5901 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005902
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005903 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005904 if (!t_options)
5905 return;
5906
5907 topt->flags = flags;
5908 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005909 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005910
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005911 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005912 &trace_options_fops);
5913
Steven Rostedt577b7852009-02-26 23:43:05 -05005914}
5915
5916static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005917create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005918{
5919 struct trace_option_dentry *topts;
5920 struct tracer_flags *flags;
5921 struct tracer_opt *opts;
5922 int cnt;
5923
5924 if (!tracer)
5925 return NULL;
5926
5927 flags = tracer->flags;
5928
5929 if (!flags || !flags->opts)
5930 return NULL;
5931
5932 opts = flags->opts;
5933
5934 for (cnt = 0; opts[cnt].name; cnt++)
5935 ;
5936
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005937 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005938 if (!topts)
5939 return NULL;
5940
5941 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005942 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005943 &opts[cnt]);
5944
5945 return topts;
5946}
5947
5948static void
5949destroy_trace_option_files(struct trace_option_dentry *topts)
5950{
5951 int cnt;
5952
5953 if (!topts)
5954 return;
5955
5956 for (cnt = 0; topts[cnt].opt; cnt++) {
5957 if (topts[cnt].entry)
5958 debugfs_remove(topts[cnt].entry);
5959 }
5960
5961 kfree(topts);
5962}
5963
Steven Rostedta8259072009-02-26 22:19:12 -05005964static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005965create_trace_option_core_file(struct trace_array *tr,
5966 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005967{
5968 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005969
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005970 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005971 if (!t_options)
5972 return NULL;
5973
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005974 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005975 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005976}
5977
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005978static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005979{
5980 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005981 int i;
5982
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005983 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005984 if (!t_options)
5985 return;
5986
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005987 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005988 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005989}
5990
Steven Rostedt499e5472012-02-22 15:50:28 -05005991static ssize_t
5992rb_simple_read(struct file *filp, char __user *ubuf,
5993 size_t cnt, loff_t *ppos)
5994{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005995 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005996 char buf[64];
5997 int r;
5998
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005999 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006000 r = sprintf(buf, "%d\n", r);
6001
6002 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6003}
6004
6005static ssize_t
6006rb_simple_write(struct file *filp, const char __user *ubuf,
6007 size_t cnt, loff_t *ppos)
6008{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006009 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006010 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006011 unsigned long val;
6012 int ret;
6013
6014 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6015 if (ret)
6016 return ret;
6017
6018 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006019 mutex_lock(&trace_types_lock);
6020 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006021 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006022 if (tr->current_trace->start)
6023 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006024 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006025 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006026 if (tr->current_trace->stop)
6027 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006028 }
6029 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006030 }
6031
6032 (*ppos)++;
6033
6034 return cnt;
6035}
6036
6037static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006038 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006039 .read = rb_simple_read,
6040 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006041 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006042 .llseek = default_llseek,
6043};
6044
Steven Rostedt277ba042012-08-03 16:10:49 -04006045struct dentry *trace_instance_dir;
6046
6047static void
6048init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6049
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006050static int
6051allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006052{
6053 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006054
6055 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6056
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006057 buf->tr = tr;
6058
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006059 buf->buffer = ring_buffer_alloc(size, rb_flags);
6060 if (!buf->buffer)
6061 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006062
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006063 buf->data = alloc_percpu(struct trace_array_cpu);
6064 if (!buf->data) {
6065 ring_buffer_free(buf->buffer);
6066 return -ENOMEM;
6067 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006068
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006069 /* Allocate the first page for all buffers */
6070 set_buffer_entries(&tr->trace_buffer,
6071 ring_buffer_size(tr->trace_buffer.buffer, 0));
6072
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006073 return 0;
6074}
6075
6076static int allocate_trace_buffers(struct trace_array *tr, int size)
6077{
6078 int ret;
6079
6080 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6081 if (ret)
6082 return ret;
6083
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006084#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006085 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6086 allocate_snapshot ? size : 1);
6087 if (WARN_ON(ret)) {
6088 ring_buffer_free(tr->trace_buffer.buffer);
6089 free_percpu(tr->trace_buffer.data);
6090 return -ENOMEM;
6091 }
6092 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006093
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006094 /*
6095 * Only the top level trace array gets its snapshot allocated
6096 * from the kernel command line.
6097 */
6098 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006099#endif
6100 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006101}
6102
6103static int new_instance_create(const char *name)
6104{
Steven Rostedt277ba042012-08-03 16:10:49 -04006105 struct trace_array *tr;
6106 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006107
6108 mutex_lock(&trace_types_lock);
6109
6110 ret = -EEXIST;
6111 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6112 if (tr->name && strcmp(tr->name, name) == 0)
6113 goto out_unlock;
6114 }
6115
6116 ret = -ENOMEM;
6117 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6118 if (!tr)
6119 goto out_unlock;
6120
6121 tr->name = kstrdup(name, GFP_KERNEL);
6122 if (!tr->name)
6123 goto out_free_tr;
6124
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006125 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6126 goto out_free_tr;
6127
6128 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6129
Steven Rostedt277ba042012-08-03 16:10:49 -04006130 raw_spin_lock_init(&tr->start_lock);
6131
6132 tr->current_trace = &nop_trace;
6133
6134 INIT_LIST_HEAD(&tr->systems);
6135 INIT_LIST_HEAD(&tr->events);
6136
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006137 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006138 goto out_free_tr;
6139
Steven Rostedt277ba042012-08-03 16:10:49 -04006140 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6141 if (!tr->dir)
6142 goto out_free_tr;
6143
6144 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006145 if (ret) {
6146 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006147 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006148 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006149
6150 init_tracer_debugfs(tr, tr->dir);
6151
6152 list_add(&tr->list, &ftrace_trace_arrays);
6153
6154 mutex_unlock(&trace_types_lock);
6155
6156 return 0;
6157
6158 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006159 if (tr->trace_buffer.buffer)
6160 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006161 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006162 kfree(tr->name);
6163 kfree(tr);
6164
6165 out_unlock:
6166 mutex_unlock(&trace_types_lock);
6167
6168 return ret;
6169
6170}
6171
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006172static int instance_delete(const char *name)
6173{
6174 struct trace_array *tr;
6175 int found = 0;
6176 int ret;
6177
6178 mutex_lock(&trace_types_lock);
6179
6180 ret = -ENODEV;
6181 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6182 if (tr->name && strcmp(tr->name, name) == 0) {
6183 found = 1;
6184 break;
6185 }
6186 }
6187 if (!found)
6188 goto out_unlock;
6189
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006190 ret = -EBUSY;
6191 if (tr->ref)
6192 goto out_unlock;
6193
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006194 list_del(&tr->list);
6195
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006196 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006197 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006198 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006199 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006200 free_percpu(tr->trace_buffer.data);
6201 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006202
6203 kfree(tr->name);
6204 kfree(tr);
6205
6206 ret = 0;
6207
6208 out_unlock:
6209 mutex_unlock(&trace_types_lock);
6210
6211 return ret;
6212}
6213
Steven Rostedt277ba042012-08-03 16:10:49 -04006214static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6215{
6216 struct dentry *parent;
6217 int ret;
6218
6219 /* Paranoid: Make sure the parent is the "instances" directory */
6220 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6221 if (WARN_ON_ONCE(parent != trace_instance_dir))
6222 return -ENOENT;
6223
6224 /*
6225 * The inode mutex is locked, but debugfs_create_dir() will also
6226 * take the mutex. As the instances directory can not be destroyed
6227 * or changed in any other way, it is safe to unlock it, and
6228 * let the dentry try. If two users try to make the same dir at
6229 * the same time, then the new_instance_create() will determine the
6230 * winner.
6231 */
6232 mutex_unlock(&inode->i_mutex);
6233
6234 ret = new_instance_create(dentry->d_iname);
6235
6236 mutex_lock(&inode->i_mutex);
6237
6238 return ret;
6239}
6240
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006241static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6242{
6243 struct dentry *parent;
6244 int ret;
6245
6246 /* Paranoid: Make sure the parent is the "instances" directory */
6247 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6248 if (WARN_ON_ONCE(parent != trace_instance_dir))
6249 return -ENOENT;
6250
6251 /* The caller did a dget() on dentry */
6252 mutex_unlock(&dentry->d_inode->i_mutex);
6253
6254 /*
6255 * The inode mutex is locked, but debugfs_create_dir() will also
6256 * take the mutex. As the instances directory can not be destroyed
6257 * or changed in any other way, it is safe to unlock it, and
6258 * let the dentry try. If two users try to make the same dir at
6259 * the same time, then the instance_delete() will determine the
6260 * winner.
6261 */
6262 mutex_unlock(&inode->i_mutex);
6263
6264 ret = instance_delete(dentry->d_iname);
6265
6266 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6267 mutex_lock(&dentry->d_inode->i_mutex);
6268
6269 return ret;
6270}
6271
Steven Rostedt277ba042012-08-03 16:10:49 -04006272static const struct inode_operations instance_dir_inode_operations = {
6273 .lookup = simple_lookup,
6274 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006275 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006276};
6277
6278static __init void create_trace_instances(struct dentry *d_tracer)
6279{
6280 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6281 if (WARN_ON(!trace_instance_dir))
6282 return;
6283
6284 /* Hijack the dir inode operations, to allow mkdir */
6285 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6286}
6287
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006288static void
6289init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6290{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006291 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006292
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006293 trace_create_file("available_tracers", 0444, d_tracer,
6294 tr, &show_traces_fops);
6295
6296 trace_create_file("current_tracer", 0644, d_tracer,
6297 tr, &set_tracer_fops);
6298
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006299 trace_create_file("tracing_cpumask", 0644, d_tracer,
6300 tr, &tracing_cpumask_fops);
6301
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006302 trace_create_file("trace_options", 0644, d_tracer,
6303 tr, &tracing_iter_fops);
6304
6305 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006306 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006307
6308 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006309 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006310
6311 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006312 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006313
6314 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6315 tr, &tracing_total_entries_fops);
6316
Wang YanQing238ae932013-05-26 16:52:01 +08006317 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006318 tr, &tracing_free_buffer_fops);
6319
6320 trace_create_file("trace_marker", 0220, d_tracer,
6321 tr, &tracing_mark_fops);
6322
6323 trace_create_file("trace_clock", 0644, d_tracer, tr,
6324 &trace_clock_fops);
6325
6326 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006327 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006328
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006329#ifdef CONFIG_TRACER_MAX_TRACE
6330 trace_create_file("tracing_max_latency", 0644, d_tracer,
6331 &tr->max_latency, &tracing_max_lat_fops);
6332#endif
6333
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006334 if (ftrace_create_function_files(tr, d_tracer))
6335 WARN(1, "Could not allocate function filter files");
6336
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006337#ifdef CONFIG_TRACER_SNAPSHOT
6338 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006339 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006340#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006341
6342 for_each_tracing_cpu(cpu)
6343 tracing_init_debugfs_percpu(tr, cpu);
6344
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006345}
6346
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006347static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006348{
6349 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006350
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006351 trace_access_lock_init();
6352
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006353 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006354 if (!d_tracer)
6355 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006356
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006357 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006358
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006359 trace_create_file("tracing_thresh", 0644, d_tracer,
6360 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006361
Li Zefan339ae5d2009-04-17 10:34:30 +08006362 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006363 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006364
Avadh Patel69abe6a2009-04-10 16:04:48 -04006365 trace_create_file("saved_cmdlines", 0444, d_tracer,
6366 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006367
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006368#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006369 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6370 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006371#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006372
Steven Rostedt277ba042012-08-03 16:10:49 -04006373 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006374
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006375 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006376
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006377 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006378}
6379
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006380static int trace_panic_handler(struct notifier_block *this,
6381 unsigned long event, void *unused)
6382{
Steven Rostedt944ac422008-10-23 19:26:08 -04006383 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006384 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006385 return NOTIFY_OK;
6386}
6387
6388static struct notifier_block trace_panic_notifier = {
6389 .notifier_call = trace_panic_handler,
6390 .next = NULL,
6391 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6392};
6393
6394static int trace_die_handler(struct notifier_block *self,
6395 unsigned long val,
6396 void *data)
6397{
6398 switch (val) {
6399 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006400 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006401 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006402 break;
6403 default:
6404 break;
6405 }
6406 return NOTIFY_OK;
6407}
6408
6409static struct notifier_block trace_die_notifier = {
6410 .notifier_call = trace_die_handler,
6411 .priority = 200
6412};
6413
6414/*
6415 * printk is set to max of 1024, we really don't need it that big.
6416 * Nothing should be printing 1000 characters anyway.
6417 */
6418#define TRACE_MAX_PRINT 1000
6419
6420/*
6421 * Define here KERN_TRACE so that we have one place to modify
6422 * it if we decide to change what log level the ftrace dump
6423 * should be at.
6424 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006425#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006426
Jason Wessel955b61e2010-08-05 09:22:23 -05006427void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006428trace_printk_seq(struct trace_seq *s)
6429{
6430 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006431 if (s->len >= TRACE_MAX_PRINT)
6432 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006433
6434 /* should be zero ended, but we are paranoid. */
6435 s->buffer[s->len] = 0;
6436
6437 printk(KERN_TRACE "%s", s->buffer);
6438
Steven Rostedtf9520752009-03-02 14:04:40 -05006439 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006440}
6441
Jason Wessel955b61e2010-08-05 09:22:23 -05006442void trace_init_global_iter(struct trace_iterator *iter)
6443{
6444 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006445 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006446 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006447 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006448
6449 if (iter->trace && iter->trace->open)
6450 iter->trace->open(iter);
6451
6452 /* Annotate start of buffers if we had overruns */
6453 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6454 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6455
6456 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6457 if (trace_clocks[iter->tr->clock_id].in_ns)
6458 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006459}
6460
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006461void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006462{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006463 /* use static because iter can be a bit big for the stack */
6464 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006465 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006466 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006467 unsigned long flags;
6468 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006469
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006470 /* Only allow one dump user at a time. */
6471 if (atomic_inc_return(&dump_running) != 1) {
6472 atomic_dec(&dump_running);
6473 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006474 }
6475
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006476 /*
6477 * Always turn off tracing when we dump.
6478 * We don't need to show trace output of what happens
6479 * between multiple crashes.
6480 *
6481 * If the user does a sysrq-z, then they can re-enable
6482 * tracing with echo 1 > tracing_on.
6483 */
6484 tracing_off();
6485
6486 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006487
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006488 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006489 trace_init_global_iter(&iter);
6490
Steven Rostedtd7690412008-10-01 00:29:53 -04006491 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006492 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006493 }
6494
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006495 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6496
Török Edwinb54d3de2008-11-22 13:28:48 +02006497 /* don't look at user memory in panic mode */
6498 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6499
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006500 switch (oops_dump_mode) {
6501 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006502 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006503 break;
6504 case DUMP_ORIG:
6505 iter.cpu_file = raw_smp_processor_id();
6506 break;
6507 case DUMP_NONE:
6508 goto out_enable;
6509 default:
6510 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006511 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006512 }
6513
6514 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006515
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006516 /* Did function tracer already get disabled? */
6517 if (ftrace_is_dead()) {
6518 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6519 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6520 }
6521
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006522 /*
6523 * We need to stop all tracing on all CPUS to read the
6524 * the next buffer. This is a bit expensive, but is
6525 * not done often. We fill all what we can read,
6526 * and then release the locks again.
6527 */
6528
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006529 while (!trace_empty(&iter)) {
6530
6531 if (!cnt)
6532 printk(KERN_TRACE "---------------------------------\n");
6533
6534 cnt++;
6535
6536 /* reset all but tr, trace, and overruns */
6537 memset(&iter.seq, 0,
6538 sizeof(struct trace_iterator) -
6539 offsetof(struct trace_iterator, seq));
6540 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6541 iter.pos = -1;
6542
Jason Wessel955b61e2010-08-05 09:22:23 -05006543 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006544 int ret;
6545
6546 ret = print_trace_line(&iter);
6547 if (ret != TRACE_TYPE_NO_CONSUME)
6548 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006549 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006550 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006551
6552 trace_printk_seq(&iter.seq);
6553 }
6554
6555 if (!cnt)
6556 printk(KERN_TRACE " (ftrace buffer empty)\n");
6557 else
6558 printk(KERN_TRACE "---------------------------------\n");
6559
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006560 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006561 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006562
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006563 for_each_tracing_cpu(cpu) {
6564 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006565 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006566 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006567 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006568}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006569EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006570
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006571__init static int tracer_alloc_buffers(void)
6572{
Steven Rostedt73c51622009-03-11 13:42:01 -04006573 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306574 int ret = -ENOMEM;
6575
David Sharp750912f2010-12-08 13:46:47 -08006576
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306577 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6578 goto out;
6579
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006580 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306581 goto out_free_buffer_mask;
6582
Steven Rostedt07d777f2011-09-22 14:01:55 -04006583 /* Only allocate trace_printk buffers if a trace_printk exists */
6584 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006585 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006586 trace_printk_init_buffers();
6587
Steven Rostedt73c51622009-03-11 13:42:01 -04006588 /* To save memory, keep the ring buffer size to its minimum */
6589 if (ring_buffer_expanded)
6590 ring_buf_size = trace_buf_size;
6591 else
6592 ring_buf_size = 1;
6593
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306594 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006595 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006596
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006597 raw_spin_lock_init(&global_trace.start_lock);
6598
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006599 /* Used for event triggers */
6600 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6601 if (!temp_buffer)
6602 goto out_free_cpumask;
6603
Steven Rostedtab464282008-05-12 21:21:00 +02006604 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006605 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006606 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6607 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006608 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006609 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006610
Steven Rostedt499e5472012-02-22 15:50:28 -05006611 if (global_trace.buffer_disabled)
6612 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006613
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006614 trace_init_cmdlines();
6615
Steven Rostedte1e232c2014-02-10 23:38:46 -05006616 if (trace_boot_clock) {
6617 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6618 if (ret < 0)
6619 pr_warning("Trace clock %s not defined, going back to default\n",
6620 trace_boot_clock);
6621 }
6622
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006623 /*
6624 * register_tracer() might reference current_trace, so it
6625 * needs to be set before we register anything. This is
6626 * just a bootstrap of current_trace anyway.
6627 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006628 global_trace.current_trace = &nop_trace;
6629
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006630 ftrace_init_global_array_ops(&global_trace);
6631
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006632 register_tracer(&nop_trace);
6633
Steven Rostedt60a11772008-05-12 21:20:44 +02006634 /* All seems OK, enable tracing */
6635 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006636
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006637 atomic_notifier_chain_register(&panic_notifier_list,
6638 &trace_panic_notifier);
6639
6640 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006641
Steven Rostedtae63b312012-05-03 23:09:03 -04006642 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6643
6644 INIT_LIST_HEAD(&global_trace.systems);
6645 INIT_LIST_HEAD(&global_trace.events);
6646 list_add(&global_trace.list, &ftrace_trace_arrays);
6647
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006648 while (trace_boot_options) {
6649 char *option;
6650
6651 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006652 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006653 }
6654
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006655 register_snapshot_cmd();
6656
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006657 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006658
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006659out_free_temp_buffer:
6660 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306661out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006662 free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664 free_percpu(global_trace.max_buffer.data);
6665#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006666 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306667out_free_buffer_mask:
6668 free_cpumask_var(tracing_buffer_mask);
6669out:
6670 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006671}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006672
6673__init static int clear_boot_tracer(void)
6674{
6675 /*
6676 * The default tracer at boot buffer is an init section.
6677 * This function is called in lateinit. If we did not
6678 * find the boot tracer, then clear it out, to prevent
6679 * later registration from accessing the buffer that is
6680 * about to be freed.
6681 */
6682 if (!default_bootup_tracer)
6683 return 0;
6684
6685 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6686 default_bootup_tracer);
6687 default_bootup_tracer = NULL;
6688
6689 return 0;
6690}
6691
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006692early_initcall(tracer_alloc_buffers);
6693fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006694late_initcall(clear_boot_tracer);