blob: eb228b9de17095418e738d5e5f1ec664d3f2caa1 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04001088static void wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1092 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001288#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001290static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1291static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1292static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1293static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001294static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001295
Steven Rostedt25b0b442008-05-12 21:21:00 +02001296/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001297static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298
1299static void trace_init_cmdlines(void)
1300{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001301 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1302 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001303 cmdline_idx = 0;
1304}
1305
Carsten Emdeb5130b12009-09-13 01:43:07 +02001306int is_tracing_stopped(void)
1307{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001308 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001309}
1310
Steven Rostedt0f048702008-11-05 16:05:44 -05001311/**
1312 * tracing_start - quick start of the tracer
1313 *
1314 * If tracing is enabled but was stopped by tracing_stop,
1315 * this will start the tracer back up.
1316 */
1317void tracing_start(void)
1318{
1319 struct ring_buffer *buffer;
1320 unsigned long flags;
1321
1322 if (tracing_disabled)
1323 return;
1324
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001325 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1326 if (--global_trace.stop_count) {
1327 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001328 /* Someone screwed up their debugging */
1329 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001330 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001331 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001332 goto out;
1333 }
1334
Steven Rostedta2f80712010-03-12 19:56:00 -05001335 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001336 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001337
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001338 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001339 if (buffer)
1340 ring_buffer_record_enable(buffer);
1341
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001342#ifdef CONFIG_TRACER_MAX_TRACE
1343 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001344 if (buffer)
1345 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001347
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001348 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001349
Steven Rostedt0f048702008-11-05 16:05:44 -05001350 ftrace_start();
1351 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001352 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1353}
1354
1355static void tracing_start_tr(struct trace_array *tr)
1356{
1357 struct ring_buffer *buffer;
1358 unsigned long flags;
1359
1360 if (tracing_disabled)
1361 return;
1362
1363 /* If global, we need to also start the max tracer */
1364 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1365 return tracing_start();
1366
1367 raw_spin_lock_irqsave(&tr->start_lock, flags);
1368
1369 if (--tr->stop_count) {
1370 if (tr->stop_count < 0) {
1371 /* Someone screwed up their debugging */
1372 WARN_ON_ONCE(1);
1373 tr->stop_count = 0;
1374 }
1375 goto out;
1376 }
1377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001378 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 if (buffer)
1380 ring_buffer_record_enable(buffer);
1381
1382 out:
1383 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001384}
1385
1386/**
1387 * tracing_stop - quick stop of the tracer
1388 *
1389 * Light weight way to stop tracing. Use in conjunction with
1390 * tracing_start.
1391 */
1392void tracing_stop(void)
1393{
1394 struct ring_buffer *buffer;
1395 unsigned long flags;
1396
1397 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001398 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1399 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001400 goto out;
1401
Steven Rostedta2f80712010-03-12 19:56:00 -05001402 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001403 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001404
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001406 if (buffer)
1407 ring_buffer_record_disable(buffer);
1408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001409#ifdef CONFIG_TRACER_MAX_TRACE
1410 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001411 if (buffer)
1412 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001414
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001415 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001416
Steven Rostedt0f048702008-11-05 16:05:44 -05001417 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001418 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1419}
1420
1421static void tracing_stop_tr(struct trace_array *tr)
1422{
1423 struct ring_buffer *buffer;
1424 unsigned long flags;
1425
1426 /* If global, we need to also stop the max tracer */
1427 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1428 return tracing_stop();
1429
1430 raw_spin_lock_irqsave(&tr->start_lock, flags);
1431 if (tr->stop_count++)
1432 goto out;
1433
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001434 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001435 if (buffer)
1436 ring_buffer_record_disable(buffer);
1437
1438 out:
1439 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001440}
1441
Ingo Molnare309b412008-05-12 21:20:51 +02001442void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001443
Ingo Molnare309b412008-05-12 21:20:51 +02001444static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001445{
Carsten Emdea635cf02009-03-18 09:00:41 +01001446 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001447
1448 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1449 return;
1450
1451 /*
1452 * It's not the end of the world if we don't get
1453 * the lock, but we also don't want to spin
1454 * nor do we want to disable interrupts,
1455 * so if we miss here, then better luck next time.
1456 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001457 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001458 return;
1459
1460 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001461 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001462 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1463
Carsten Emdea635cf02009-03-18 09:00:41 +01001464 /*
1465 * Check whether the cmdline buffer at idx has a pid
1466 * mapped. We are going to overwrite that entry so we
1467 * need to clear the map_pid_to_cmdline. Otherwise we
1468 * would read the new comm for the old pid.
1469 */
1470 pid = map_cmdline_to_pid[idx];
1471 if (pid != NO_CMDLINE_MAP)
1472 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001473
Carsten Emdea635cf02009-03-18 09:00:41 +01001474 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475 map_pid_to_cmdline[tsk->pid] = idx;
1476
1477 cmdline_idx = idx;
1478 }
1479
1480 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1481
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001482 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483}
1484
Steven Rostedt4ca53082009-03-16 19:20:15 -04001485void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001487 unsigned map;
1488
Steven Rostedt4ca53082009-03-16 19:20:15 -04001489 if (!pid) {
1490 strcpy(comm, "<idle>");
1491 return;
1492 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493
Steven Rostedt74bf4072010-01-25 15:11:53 -05001494 if (WARN_ON_ONCE(pid < 0)) {
1495 strcpy(comm, "<XXX>");
1496 return;
1497 }
1498
Steven Rostedt4ca53082009-03-16 19:20:15 -04001499 if (pid > PID_MAX_DEFAULT) {
1500 strcpy(comm, "<...>");
1501 return;
1502 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001503
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001504 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001505 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001506 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001507 if (map != NO_CMDLINE_MAP)
1508 strcpy(comm, saved_cmdlines[map]);
1509 else
1510 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001512 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001513 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514}
1515
Ingo Molnare309b412008-05-12 21:20:51 +02001516void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001518 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519 return;
1520
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001521 if (!__this_cpu_read(trace_cmdline_save))
1522 return;
1523
1524 __this_cpu_write(trace_cmdline_save, false);
1525
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 trace_save_cmdline(tsk);
1527}
1528
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001529void
Steven Rostedt38697052008-10-01 13:14:09 -04001530tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1531 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532{
1533 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534
Steven Rostedt777e2082008-09-29 23:02:42 -04001535 entry->preempt_count = pc & 0xff;
1536 entry->pid = (tsk) ? tsk->pid : 0;
1537 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001538#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001539 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001540#else
1541 TRACE_FLAG_IRQS_NOSUPPORT |
1542#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1544 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001545 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1546 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001548EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001549
Steven Rostedte77405a2009-09-02 14:17:06 -04001550struct ring_buffer_event *
1551trace_buffer_lock_reserve(struct ring_buffer *buffer,
1552 int type,
1553 unsigned long len,
1554 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001555{
1556 struct ring_buffer_event *event;
1557
Steven Rostedte77405a2009-09-02 14:17:06 -04001558 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001559 if (event != NULL) {
1560 struct trace_entry *ent = ring_buffer_event_data(event);
1561
1562 tracing_generic_entry_update(ent, flags, pc);
1563 ent->type = type;
1564 }
1565
1566 return event;
1567}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001568
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001569void
1570__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1571{
1572 __this_cpu_write(trace_cmdline_save, true);
1573 ring_buffer_unlock_commit(buffer, event);
1574}
1575
Steven Rostedte77405a2009-09-02 14:17:06 -04001576static inline void
1577__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1578 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001579 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001580{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001581 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001582
Steven Rostedte77405a2009-09-02 14:17:06 -04001583 ftrace_trace_stack(buffer, flags, 6, pc);
1584 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001585}
1586
Steven Rostedte77405a2009-09-02 14:17:06 -04001587void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1588 struct ring_buffer_event *event,
1589 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001590{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001591 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001592}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001593EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001594
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001595static struct ring_buffer *temp_buffer;
1596
Steven Rostedtef5580d2009-02-27 19:38:04 -05001597struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001598trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1599 struct ftrace_event_file *ftrace_file,
1600 int type, unsigned long len,
1601 unsigned long flags, int pc)
1602{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001603 struct ring_buffer_event *entry;
1604
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001605 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001606 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001607 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001608 /*
1609 * If tracing is off, but we have triggers enabled
1610 * we still need to look at the event data. Use the temp_buffer
1611 * to store the trace event for the tigger to use. It's recusive
1612 * safe and will not be recorded anywhere.
1613 */
1614 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1615 *current_rb = temp_buffer;
1616 entry = trace_buffer_lock_reserve(*current_rb,
1617 type, len, flags, pc);
1618 }
1619 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001620}
1621EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1622
1623struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001624trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1625 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001626 unsigned long flags, int pc)
1627{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001628 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001629 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001630 type, len, flags, pc);
1631}
Steven Rostedt94487d62009-05-05 19:22:53 -04001632EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001633
Steven Rostedte77405a2009-09-02 14:17:06 -04001634void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001636 unsigned long flags, int pc)
1637{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001639}
Steven Rostedt94487d62009-05-05 19:22:53 -04001640EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001641
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001642void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1643 struct ring_buffer_event *event,
1644 unsigned long flags, int pc,
1645 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001646{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001647 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001648
1649 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1650 ftrace_trace_userstack(buffer, flags, pc);
1651}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001653
Steven Rostedte77405a2009-09-02 14:17:06 -04001654void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1655 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001656{
Steven Rostedte77405a2009-09-02 14:17:06 -04001657 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001658}
Steven Rostedt12acd472009-04-17 16:01:56 -04001659EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001660
Ingo Molnare309b412008-05-12 21:20:51 +02001661void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001662trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001663 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1664 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001665{
Tom Zanussie1112b42009-03-31 00:48:49 -05001666 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001667 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001668 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001669 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001670
Steven Rostedtd7690412008-10-01 00:29:53 -04001671 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001672 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001673 return;
1674
Steven Rostedte77405a2009-09-02 14:17:06 -04001675 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001676 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001677 if (!event)
1678 return;
1679 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001680 entry->ip = ip;
1681 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001682
Tom Zanussif306cc82013-10-24 08:34:17 -05001683 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001684 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001685}
1686
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001687#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001688
1689#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1690struct ftrace_stack {
1691 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1692};
1693
1694static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1695static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1696
Steven Rostedte77405a2009-09-02 14:17:06 -04001697static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001698 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001699 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001700{
Tom Zanussie1112b42009-03-31 00:48:49 -05001701 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001702 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001703 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001704 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001705 int use_stack;
1706 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001707
1708 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001709 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001710
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001711 /*
1712 * Since events can happen in NMIs there's no safe way to
1713 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1714 * or NMI comes in, it will just have to use the default
1715 * FTRACE_STACK_SIZE.
1716 */
1717 preempt_disable_notrace();
1718
Shan Wei82146522012-11-19 13:21:01 +08001719 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001720 /*
1721 * We don't need any atomic variables, just a barrier.
1722 * If an interrupt comes in, we don't care, because it would
1723 * have exited and put the counter back to what we want.
1724 * We just need a barrier to keep gcc from moving things
1725 * around.
1726 */
1727 barrier();
1728 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001729 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001730 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1731
1732 if (regs)
1733 save_stack_trace_regs(regs, &trace);
1734 else
1735 save_stack_trace(&trace);
1736
1737 if (trace.nr_entries > size)
1738 size = trace.nr_entries;
1739 } else
1740 /* From now on, use_stack is a boolean */
1741 use_stack = 0;
1742
1743 size *= sizeof(unsigned long);
1744
1745 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1746 sizeof(*entry) + size, flags, pc);
1747 if (!event)
1748 goto out;
1749 entry = ring_buffer_event_data(event);
1750
1751 memset(&entry->caller, 0, size);
1752
1753 if (use_stack)
1754 memcpy(&entry->caller, trace.entries,
1755 trace.nr_entries * sizeof(unsigned long));
1756 else {
1757 trace.max_entries = FTRACE_STACK_ENTRIES;
1758 trace.entries = entry->caller;
1759 if (regs)
1760 save_stack_trace_regs(regs, &trace);
1761 else
1762 save_stack_trace(&trace);
1763 }
1764
1765 entry->size = trace.nr_entries;
1766
Tom Zanussif306cc82013-10-24 08:34:17 -05001767 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001768 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001769
1770 out:
1771 /* Again, don't let gcc optimize things here */
1772 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001773 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001774 preempt_enable_notrace();
1775
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001776}
1777
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001778void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1779 int skip, int pc, struct pt_regs *regs)
1780{
1781 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1782 return;
1783
1784 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1785}
1786
Steven Rostedte77405a2009-09-02 14:17:06 -04001787void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1788 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001789{
1790 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1791 return;
1792
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001793 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001794}
1795
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001796void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1797 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001798{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001799 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001800}
1801
Steven Rostedt03889382009-12-11 09:48:22 -05001802/**
1803 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001804 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001805 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001806void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001807{
1808 unsigned long flags;
1809
1810 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001811 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001812
1813 local_save_flags(flags);
1814
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001815 /*
1816 * Skip 3 more, seems to get us at the caller of
1817 * this function.
1818 */
1819 skip += 3;
1820 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1821 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001822}
1823
Steven Rostedt91e86e52010-11-10 12:56:12 +01001824static DEFINE_PER_CPU(int, user_stack_count);
1825
Steven Rostedte77405a2009-09-02 14:17:06 -04001826void
1827ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001828{
Tom Zanussie1112b42009-03-31 00:48:49 -05001829 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001830 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001831 struct userstack_entry *entry;
1832 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001833
1834 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1835 return;
1836
Steven Rostedtb6345872010-03-12 20:03:30 -05001837 /*
1838 * NMIs can not handle page faults, even with fix ups.
1839 * The save user stack can (and often does) fault.
1840 */
1841 if (unlikely(in_nmi()))
1842 return;
1843
Steven Rostedt91e86e52010-11-10 12:56:12 +01001844 /*
1845 * prevent recursion, since the user stack tracing may
1846 * trigger other kernel events.
1847 */
1848 preempt_disable();
1849 if (__this_cpu_read(user_stack_count))
1850 goto out;
1851
1852 __this_cpu_inc(user_stack_count);
1853
Steven Rostedte77405a2009-09-02 14:17:06 -04001854 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001855 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001856 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001857 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001858 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001859
Steven Rostedt48659d32009-09-11 11:36:23 -04001860 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001861 memset(&entry->caller, 0, sizeof(entry->caller));
1862
1863 trace.nr_entries = 0;
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.skip = 0;
1866 trace.entries = entry->caller;
1867
1868 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001869 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001870 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001871
Li Zefan1dbd1952010-12-09 15:47:56 +08001872 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001873 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001874 out:
1875 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001876}
1877
Hannes Eder4fd27352009-02-10 19:44:12 +01001878#ifdef UNUSED
1879static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001880{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001881 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001882}
Hannes Eder4fd27352009-02-10 19:44:12 +01001883#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001884
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001885#endif /* CONFIG_STACKTRACE */
1886
Steven Rostedt07d777f2011-09-22 14:01:55 -04001887/* created for use with alloc_percpu */
1888struct trace_buffer_struct {
1889 char buffer[TRACE_BUF_SIZE];
1890};
1891
1892static struct trace_buffer_struct *trace_percpu_buffer;
1893static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1894static struct trace_buffer_struct *trace_percpu_irq_buffer;
1895static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1896
1897/*
1898 * The buffer used is dependent on the context. There is a per cpu
1899 * buffer for normal context, softirq contex, hard irq context and
1900 * for NMI context. Thise allows for lockless recording.
1901 *
1902 * Note, if the buffers failed to be allocated, then this returns NULL
1903 */
1904static char *get_trace_buf(void)
1905{
1906 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001907
1908 /*
1909 * If we have allocated per cpu buffers, then we do not
1910 * need to do any locking.
1911 */
1912 if (in_nmi())
1913 percpu_buffer = trace_percpu_nmi_buffer;
1914 else if (in_irq())
1915 percpu_buffer = trace_percpu_irq_buffer;
1916 else if (in_softirq())
1917 percpu_buffer = trace_percpu_sirq_buffer;
1918 else
1919 percpu_buffer = trace_percpu_buffer;
1920
1921 if (!percpu_buffer)
1922 return NULL;
1923
Shan Weid8a03492012-11-13 09:53:04 +08001924 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001925}
1926
1927static int alloc_percpu_trace_buffer(void)
1928{
1929 struct trace_buffer_struct *buffers;
1930 struct trace_buffer_struct *sirq_buffers;
1931 struct trace_buffer_struct *irq_buffers;
1932 struct trace_buffer_struct *nmi_buffers;
1933
1934 buffers = alloc_percpu(struct trace_buffer_struct);
1935 if (!buffers)
1936 goto err_warn;
1937
1938 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1939 if (!sirq_buffers)
1940 goto err_sirq;
1941
1942 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1943 if (!irq_buffers)
1944 goto err_irq;
1945
1946 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1947 if (!nmi_buffers)
1948 goto err_nmi;
1949
1950 trace_percpu_buffer = buffers;
1951 trace_percpu_sirq_buffer = sirq_buffers;
1952 trace_percpu_irq_buffer = irq_buffers;
1953 trace_percpu_nmi_buffer = nmi_buffers;
1954
1955 return 0;
1956
1957 err_nmi:
1958 free_percpu(irq_buffers);
1959 err_irq:
1960 free_percpu(sirq_buffers);
1961 err_sirq:
1962 free_percpu(buffers);
1963 err_warn:
1964 WARN(1, "Could not allocate percpu trace_printk buffer");
1965 return -ENOMEM;
1966}
1967
Steven Rostedt81698832012-10-11 10:15:05 -04001968static int buffers_allocated;
1969
Steven Rostedt07d777f2011-09-22 14:01:55 -04001970void trace_printk_init_buffers(void)
1971{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001972 if (buffers_allocated)
1973 return;
1974
1975 if (alloc_percpu_trace_buffer())
1976 return;
1977
Steven Rostedt2184db42014-05-28 13:14:40 -04001978 /* trace_printk() is for debug use only. Don't use it in production. */
1979
1980 pr_warning("\n**********************************************************\n");
1981 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1982 pr_warning("** **\n");
1983 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
1984 pr_warning("** **\n");
1985 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
1986 pr_warning("** unsafe for produciton use. **\n");
1987 pr_warning("** **\n");
1988 pr_warning("** If you see this message and you are not debugging **\n");
1989 pr_warning("** the kernel, report this immediately to your vendor! **\n");
1990 pr_warning("** **\n");
1991 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1992 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04001993
Steven Rostedtb382ede62012-10-10 21:44:34 -04001994 /* Expand the buffers to set size */
1995 tracing_update_buffers();
1996
Steven Rostedt07d777f2011-09-22 14:01:55 -04001997 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001998
1999 /*
2000 * trace_printk_init_buffers() can be called by modules.
2001 * If that happens, then we need to start cmdline recording
2002 * directly here. If the global_trace.buffer is already
2003 * allocated here, then this was called by module code.
2004 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002005 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002006 tracing_start_cmdline_record();
2007}
2008
2009void trace_printk_start_comm(void)
2010{
2011 /* Start tracing comms if trace printk is set */
2012 if (!buffers_allocated)
2013 return;
2014 tracing_start_cmdline_record();
2015}
2016
2017static void trace_printk_start_stop_comm(int enabled)
2018{
2019 if (!buffers_allocated)
2020 return;
2021
2022 if (enabled)
2023 tracing_start_cmdline_record();
2024 else
2025 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002026}
2027
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002028/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002029 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002030 *
2031 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002032int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002033{
Tom Zanussie1112b42009-03-31 00:48:49 -05002034 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002035 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002036 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002037 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002038 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002039 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002040 char *tbuffer;
2041 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002042
2043 if (unlikely(tracing_selftest_running || tracing_disabled))
2044 return 0;
2045
2046 /* Don't pollute graph traces with trace_vprintk internals */
2047 pause_graph_tracing();
2048
2049 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002050 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002051
Steven Rostedt07d777f2011-09-22 14:01:55 -04002052 tbuffer = get_trace_buf();
2053 if (!tbuffer) {
2054 len = 0;
2055 goto out;
2056 }
2057
2058 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2059
2060 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002061 goto out;
2062
Steven Rostedt07d777f2011-09-22 14:01:55 -04002063 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002064 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002065 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002066 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2067 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002068 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002069 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002070 entry = ring_buffer_event_data(event);
2071 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002072 entry->fmt = fmt;
2073
Steven Rostedt07d777f2011-09-22 14:01:55 -04002074 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002075 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002076 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002077 ftrace_trace_stack(buffer, flags, 6, pc);
2078 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002079
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002080out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002081 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002082 unpause_graph_tracing();
2083
2084 return len;
2085}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002086EXPORT_SYMBOL_GPL(trace_vbprintk);
2087
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002088static int
2089__trace_array_vprintk(struct ring_buffer *buffer,
2090 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002091{
Tom Zanussie1112b42009-03-31 00:48:49 -05002092 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002093 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002094 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 unsigned long flags;
2097 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002098
2099 if (tracing_disabled || tracing_selftest_running)
2100 return 0;
2101
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102 /* Don't pollute graph traces with trace_vprintk internals */
2103 pause_graph_tracing();
2104
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002105 pc = preempt_count();
2106 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002107
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108
2109 tbuffer = get_trace_buf();
2110 if (!tbuffer) {
2111 len = 0;
2112 goto out;
2113 }
2114
2115 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2116 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002117 goto out;
2118
Steven Rostedt07d777f2011-09-22 14:01:55 -04002119 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002120 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002121 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002123 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002124 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002125 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002126 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002127
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002129 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002130 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002131 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002132 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002133 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002134 out:
2135 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002136 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137
2138 return len;
2139}
Steven Rostedt659372d2009-09-03 19:11:07 -04002140
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002141int trace_array_vprintk(struct trace_array *tr,
2142 unsigned long ip, const char *fmt, va_list args)
2143{
2144 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2145}
2146
2147int trace_array_printk(struct trace_array *tr,
2148 unsigned long ip, const char *fmt, ...)
2149{
2150 int ret;
2151 va_list ap;
2152
2153 if (!(trace_flags & TRACE_ITER_PRINTK))
2154 return 0;
2155
2156 va_start(ap, fmt);
2157 ret = trace_array_vprintk(tr, ip, fmt, ap);
2158 va_end(ap);
2159 return ret;
2160}
2161
2162int trace_array_printk_buf(struct ring_buffer *buffer,
2163 unsigned long ip, const char *fmt, ...)
2164{
2165 int ret;
2166 va_list ap;
2167
2168 if (!(trace_flags & TRACE_ITER_PRINTK))
2169 return 0;
2170
2171 va_start(ap, fmt);
2172 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2173 va_end(ap);
2174 return ret;
2175}
2176
Steven Rostedt659372d2009-09-03 19:11:07 -04002177int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2178{
Steven Rostedta813a152009-10-09 01:41:35 -04002179 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002180}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002181EXPORT_SYMBOL_GPL(trace_vprintk);
2182
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002183static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002184{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002185 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2186
Steven Rostedt5a90f572008-09-03 17:42:51 -04002187 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002188 if (buf_iter)
2189 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002190}
2191
Ingo Molnare309b412008-05-12 21:20:51 +02002192static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002193peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2194 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002195{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002196 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002197 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002198
Steven Rostedtd7690412008-10-01 00:29:53 -04002199 if (buf_iter)
2200 event = ring_buffer_iter_peek(buf_iter, ts);
2201 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002202 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002203 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002204
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002205 if (event) {
2206 iter->ent_size = ring_buffer_event_length(event);
2207 return ring_buffer_event_data(event);
2208 }
2209 iter->ent_size = 0;
2210 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002211}
Steven Rostedtd7690412008-10-01 00:29:53 -04002212
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002213static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002214__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2215 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002216{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002217 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002218 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002219 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002220 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002221 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002222 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002223 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002224 int cpu;
2225
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002226 /*
2227 * If we are in a per_cpu trace file, don't bother by iterating over
2228 * all cpu and peek directly.
2229 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002230 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002231 if (ring_buffer_empty_cpu(buffer, cpu_file))
2232 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002233 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002234 if (ent_cpu)
2235 *ent_cpu = cpu_file;
2236
2237 return ent;
2238 }
2239
Steven Rostedtab464282008-05-12 21:21:00 +02002240 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002241
2242 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002243 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002244
Steven Rostedtbc21b472010-03-31 19:49:26 -04002245 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002246
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002247 /*
2248 * Pick the entry with the smallest timestamp:
2249 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002250 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002251 next = ent;
2252 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002253 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002254 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002255 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002256 }
2257 }
2258
Steven Rostedt12b5da32012-03-27 10:43:28 -04002259 iter->ent_size = next_size;
2260
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002261 if (ent_cpu)
2262 *ent_cpu = next_cpu;
2263
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002264 if (ent_ts)
2265 *ent_ts = next_ts;
2266
Steven Rostedtbc21b472010-03-31 19:49:26 -04002267 if (missing_events)
2268 *missing_events = next_lost;
2269
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270 return next;
2271}
2272
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002273/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002274struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2275 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002276{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002277 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002278}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002279
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002280/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002281void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002282{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002283 iter->ent = __find_next_entry(iter, &iter->cpu,
2284 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002285
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002286 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002287 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002288
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002289 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002290}
2291
Ingo Molnare309b412008-05-12 21:20:51 +02002292static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002293{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002294 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002295 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002296}
2297
Ingo Molnare309b412008-05-12 21:20:51 +02002298static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299{
2300 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002302 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002303
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002304 WARN_ON_ONCE(iter->leftover);
2305
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002306 (*pos)++;
2307
2308 /* can't go backwards */
2309 if (iter->idx > i)
2310 return NULL;
2311
2312 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002313 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002314 else
2315 ent = iter;
2316
2317 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002318 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002319
2320 iter->pos = *pos;
2321
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002322 return ent;
2323}
2324
Jason Wessel955b61e2010-08-05 09:22:23 -05002325void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002326{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002327 struct ring_buffer_event *event;
2328 struct ring_buffer_iter *buf_iter;
2329 unsigned long entries = 0;
2330 u64 ts;
2331
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002332 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002333
Steven Rostedt6d158a82012-06-27 20:46:14 -04002334 buf_iter = trace_buffer_iter(iter, cpu);
2335 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002336 return;
2337
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002338 ring_buffer_iter_reset(buf_iter);
2339
2340 /*
2341 * We could have the case with the max latency tracers
2342 * that a reset never took place on a cpu. This is evident
2343 * by the timestamp being before the start of the buffer.
2344 */
2345 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002346 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002347 break;
2348 entries++;
2349 ring_buffer_read(buf_iter, NULL);
2350 }
2351
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002352 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002353}
2354
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002355/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002356 * The current tracer is copied to avoid a global locking
2357 * all around.
2358 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359static void *s_start(struct seq_file *m, loff_t *pos)
2360{
2361 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002362 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002363 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002364 void *p = NULL;
2365 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002366 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002367
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002368 /*
2369 * copy the tracer to avoid using a global lock all around.
2370 * iter->trace is a copy of current_trace, the pointer to the
2371 * name may be used instead of a strcmp(), as iter->trace->name
2372 * will point to the same string as current_trace->name.
2373 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002374 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002375 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2376 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002377 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002379#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002380 if (iter->snapshot && iter->trace->use_max_tr)
2381 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002382#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002383
2384 if (!iter->snapshot)
2385 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002386
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002387 if (*pos != iter->pos) {
2388 iter->ent = NULL;
2389 iter->cpu = 0;
2390 iter->idx = -1;
2391
Steven Rostedtae3b5092013-01-23 15:22:59 -05002392 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002393 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002395 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002396 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397
Lai Jiangshanac91d852010-03-02 17:54:50 +08002398 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2400 ;
2401
2402 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002403 /*
2404 * If we overflowed the seq_file before, then we want
2405 * to just reuse the trace_seq buffer again.
2406 */
2407 if (iter->leftover)
2408 p = iter;
2409 else {
2410 l = *pos - 1;
2411 p = s_next(m, p, &l);
2412 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413 }
2414
Lai Jiangshan4f535962009-05-18 19:35:34 +08002415 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002416 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 return p;
2418}
2419
2420static void s_stop(struct seq_file *m, void *p)
2421{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002422 struct trace_iterator *iter = m->private;
2423
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002424#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002425 if (iter->snapshot && iter->trace->use_max_tr)
2426 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002427#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002428
2429 if (!iter->snapshot)
2430 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002432 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002433 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434}
2435
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002436static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002437get_total_entries(struct trace_buffer *buf,
2438 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002439{
2440 unsigned long count;
2441 int cpu;
2442
2443 *total = 0;
2444 *entries = 0;
2445
2446 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002448 /*
2449 * If this buffer has skipped entries, then we hold all
2450 * entries for the trace and we need to ignore the
2451 * ones before the time stamp.
2452 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002453 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2454 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002455 /* total is the same as the entries */
2456 *total += count;
2457 } else
2458 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002459 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002460 *entries += count;
2461 }
2462}
2463
Ingo Molnare309b412008-05-12 21:20:51 +02002464static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Michael Ellermana6168352008-08-20 16:36:11 -07002466 seq_puts(m, "# _------=> CPU# \n");
2467 seq_puts(m, "# / _-----=> irqs-off \n");
2468 seq_puts(m, "# | / _----=> need-resched \n");
2469 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2470 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002471 seq_puts(m, "# |||| / delay \n");
2472 seq_puts(m, "# cmd pid ||||| time | caller \n");
2473 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002474}
2475
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002476static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002477{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002478 unsigned long total;
2479 unsigned long entries;
2480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2483 entries, total, num_online_cpus());
2484 seq_puts(m, "#\n");
2485}
2486
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002488{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002489 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002490 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002491 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002492}
2493
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002494static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002495{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002496 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002497 seq_puts(m, "# _-----=> irqs-off\n");
2498 seq_puts(m, "# / _----=> need-resched\n");
2499 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2500 seq_puts(m, "# || / _--=> preempt-depth\n");
2501 seq_puts(m, "# ||| / delay\n");
2502 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2503 seq_puts(m, "# | | | |||| | |\n");
2504}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002505
Jiri Olsa62b915f2010-04-02 19:01:22 +02002506void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002507print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2508{
2509 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510 struct trace_buffer *buf = iter->trace_buffer;
2511 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002512 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002513 unsigned long entries;
2514 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002515 const char *name = "preemption";
2516
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002517 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002518
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002521 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002523 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002525 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002526 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002527 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002529 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531#if defined(CONFIG_PREEMPT_NONE)
2532 "server",
2533#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2534 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002535#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536 "preempt",
2537#else
2538 "unknown",
2539#endif
2540 /* These are reserved for later use */
2541 0, 0, 0, 0);
2542#ifdef CONFIG_SMP
2543 seq_printf(m, " #P:%d)\n", num_online_cpus());
2544#else
2545 seq_puts(m, ")\n");
2546#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002547 seq_puts(m, "# -----------------\n");
2548 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002549 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002550 data->comm, data->pid,
2551 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002552 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554
2555 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002556 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002557 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2558 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002559 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002560 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2561 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002562 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563 }
2564
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002565 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566}
2567
Steven Rostedta3097202008-11-07 22:36:02 -05002568static void test_cpu_buff_start(struct trace_iterator *iter)
2569{
2570 struct trace_seq *s = &iter->seq;
2571
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002572 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2573 return;
2574
2575 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2576 return;
2577
Rusty Russell44623442009-01-01 10:12:23 +10302578 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002579 return;
2580
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002581 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002582 return;
2583
Rusty Russell44623442009-01-01 10:12:23 +10302584 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002585
2586 /* Don't print started cpu buffer for the first entry of the trace */
2587 if (iter->idx > 1)
2588 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2589 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002590}
2591
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002592static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002593{
Steven Rostedt214023c2008-05-12 21:20:46 +02002594 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002596 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002597 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002599 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002600
Steven Rostedta3097202008-11-07 22:36:02 -05002601 test_cpu_buff_start(iter);
2602
Steven Rostedtf633cef2008-12-23 23:24:13 -05002603 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002604
2605 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002606 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2607 if (!trace_print_lat_context(iter))
2608 goto partial;
2609 } else {
2610 if (!trace_print_context(iter))
2611 goto partial;
2612 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002613 }
2614
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002615 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002616 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002617
2618 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2619 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002620
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002621 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002622partial:
2623 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002624}
2625
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002626static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002627{
2628 struct trace_seq *s = &iter->seq;
2629 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002630 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002631
2632 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002633
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002634 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002635 if (!trace_seq_printf(s, "%d %d %llu ",
2636 entry->pid, iter->cpu, iter->ts))
2637 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002638 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002639
Steven Rostedtf633cef2008-12-23 23:24:13 -05002640 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002641 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002642 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002643
2644 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2645 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002646
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002647 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002648partial:
2649 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002650}
2651
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002652static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002653{
2654 struct trace_seq *s = &iter->seq;
2655 unsigned char newline = '\n';
2656 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002657 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002658
2659 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002660
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2663 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2664 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2665 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002666
Steven Rostedtf633cef2008-12-23 23:24:13 -05002667 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002668 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002669 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002670 if (ret != TRACE_TYPE_HANDLED)
2671 return ret;
2672 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002673
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002674 SEQ_PUT_FIELD_RET(s, newline);
2675
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002676 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002677}
2678
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002679static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002680{
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002683 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002684
2685 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002686
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002687 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2688 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002689 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002690 SEQ_PUT_FIELD_RET(s, iter->ts);
2691 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002692
Steven Rostedtf633cef2008-12-23 23:24:13 -05002693 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002694 return event ? event->funcs->binary(iter, 0, event) :
2695 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002696}
2697
Jiri Olsa62b915f2010-04-02 19:01:22 +02002698int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002699{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002700 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002701 int cpu;
2702
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002703 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002704 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002705 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002706 buf_iter = trace_buffer_iter(iter, cpu);
2707 if (buf_iter) {
2708 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002709 return 0;
2710 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002711 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002712 return 0;
2713 }
2714 return 1;
2715 }
2716
Steven Rostedtab464282008-05-12 21:21:00 +02002717 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002718 buf_iter = trace_buffer_iter(iter, cpu);
2719 if (buf_iter) {
2720 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002721 return 0;
2722 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002723 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002724 return 0;
2725 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002726 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002727
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002728 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002729}
2730
Lai Jiangshan4f535962009-05-18 19:35:34 +08002731/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002732enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002733{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002734 enum print_line_t ret;
2735
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002736 if (iter->lost_events &&
2737 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2738 iter->cpu, iter->lost_events))
2739 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002740
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002741 if (iter->trace && iter->trace->print_line) {
2742 ret = iter->trace->print_line(iter);
2743 if (ret != TRACE_TYPE_UNHANDLED)
2744 return ret;
2745 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002746
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002747 if (iter->ent->type == TRACE_BPUTS &&
2748 trace_flags & TRACE_ITER_PRINTK &&
2749 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2750 return trace_print_bputs_msg_only(iter);
2751
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002752 if (iter->ent->type == TRACE_BPRINT &&
2753 trace_flags & TRACE_ITER_PRINTK &&
2754 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002755 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002756
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002757 if (iter->ent->type == TRACE_PRINT &&
2758 trace_flags & TRACE_ITER_PRINTK &&
2759 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002760 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002761
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002762 if (trace_flags & TRACE_ITER_BIN)
2763 return print_bin_fmt(iter);
2764
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002765 if (trace_flags & TRACE_ITER_HEX)
2766 return print_hex_fmt(iter);
2767
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002768 if (trace_flags & TRACE_ITER_RAW)
2769 return print_raw_fmt(iter);
2770
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002771 return print_trace_fmt(iter);
2772}
2773
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002774void trace_latency_header(struct seq_file *m)
2775{
2776 struct trace_iterator *iter = m->private;
2777
2778 /* print nothing if the buffers are empty */
2779 if (trace_empty(iter))
2780 return;
2781
2782 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2783 print_trace_header(m, iter);
2784
2785 if (!(trace_flags & TRACE_ITER_VERBOSE))
2786 print_lat_help_header(m);
2787}
2788
Jiri Olsa62b915f2010-04-02 19:01:22 +02002789void trace_default_header(struct seq_file *m)
2790{
2791 struct trace_iterator *iter = m->private;
2792
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002793 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2794 return;
2795
Jiri Olsa62b915f2010-04-02 19:01:22 +02002796 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2797 /* print nothing if the buffers are empty */
2798 if (trace_empty(iter))
2799 return;
2800 print_trace_header(m, iter);
2801 if (!(trace_flags & TRACE_ITER_VERBOSE))
2802 print_lat_help_header(m);
2803 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002804 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2805 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002806 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002807 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002808 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002809 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002810 }
2811}
2812
Steven Rostedte0a413f2011-09-29 21:26:16 -04002813static void test_ftrace_alive(struct seq_file *m)
2814{
2815 if (!ftrace_is_dead())
2816 return;
2817 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2818 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2819}
2820
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002821#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002822static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002823{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002824 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2825 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2826 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002827 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002828 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2829 seq_printf(m, "# is not a '0' or '1')\n");
2830}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002831
2832static void show_snapshot_percpu_help(struct seq_file *m)
2833{
2834 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2835#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2836 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2837 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2838#else
2839 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2840 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2841#endif
2842 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2843 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2844 seq_printf(m, "# is not a '0' or '1')\n");
2845}
2846
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002847static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2848{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002849 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002850 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2851 else
2852 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2853
2854 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002855 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2856 show_snapshot_main_help(m);
2857 else
2858 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002859}
2860#else
2861/* Should never be called */
2862static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2863#endif
2864
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002865static int s_show(struct seq_file *m, void *v)
2866{
2867 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002868 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869
2870 if (iter->ent == NULL) {
2871 if (iter->tr) {
2872 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2873 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002874 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002875 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002876 if (iter->snapshot && trace_empty(iter))
2877 print_snapshot_help(m, iter);
2878 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002879 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002880 else
2881 trace_default_header(m);
2882
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002883 } else if (iter->leftover) {
2884 /*
2885 * If we filled the seq_file buffer earlier, we
2886 * want to just show it now.
2887 */
2888 ret = trace_print_seq(m, &iter->seq);
2889
2890 /* ret should this time be zero, but you never know */
2891 iter->leftover = ret;
2892
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002893 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002894 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002895 ret = trace_print_seq(m, &iter->seq);
2896 /*
2897 * If we overflow the seq_file buffer, then it will
2898 * ask us for this data again at start up.
2899 * Use that instead.
2900 * ret is 0 if seq_file write succeeded.
2901 * -1 otherwise.
2902 */
2903 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002904 }
2905
2906 return 0;
2907}
2908
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002909/*
2910 * Should be used after trace_array_get(), trace_types_lock
2911 * ensures that i_cdev was already initialized.
2912 */
2913static inline int tracing_get_cpu(struct inode *inode)
2914{
2915 if (inode->i_cdev) /* See trace_create_cpu_file() */
2916 return (long)inode->i_cdev - 1;
2917 return RING_BUFFER_ALL_CPUS;
2918}
2919
James Morris88e9d342009-09-22 16:43:43 -07002920static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002921 .start = s_start,
2922 .next = s_next,
2923 .stop = s_stop,
2924 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925};
2926
Ingo Molnare309b412008-05-12 21:20:51 +02002927static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002928__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002930 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002931 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002932 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002933
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002934 if (tracing_disabled)
2935 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002936
Jiri Olsa50e18b92012-04-25 10:23:39 +02002937 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002938 if (!iter)
2939 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002940
Steven Rostedt6d158a82012-06-27 20:46:14 -04002941 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2942 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002943 if (!iter->buffer_iter)
2944 goto release;
2945
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002946 /*
2947 * We make a copy of the current tracer to avoid concurrent
2948 * changes on it while we are reading.
2949 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002950 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002951 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002952 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002953 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002954
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002955 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002956
Li Zefan79f55992009-06-15 14:58:26 +08002957 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002958 goto fail;
2959
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002960 iter->tr = tr;
2961
2962#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002963 /* Currently only the top directory has a snapshot */
2964 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002965 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002966 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002967#endif
2968 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002969 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002970 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002971 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002972 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002974 /* Notify the tracer early; before we stop tracing. */
2975 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002976 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002977
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002978 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002979 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002980 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2981
David Sharp8be07092012-11-13 12:18:22 -08002982 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002983 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002984 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2985
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002986 /* stop the trace while dumping if we are not opening "snapshot" */
2987 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002988 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002989
Steven Rostedtae3b5092013-01-23 15:22:59 -05002990 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002991 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002992 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002993 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002994 }
2995 ring_buffer_read_prepare_sync();
2996 for_each_tracing_cpu(cpu) {
2997 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002998 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002999 }
3000 } else {
3001 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003002 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003003 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003004 ring_buffer_read_prepare_sync();
3005 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003006 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003007 }
3008
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003009 mutex_unlock(&trace_types_lock);
3010
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003011 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003012
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003014 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003015 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003016 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003017release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003018 seq_release_private(inode, file);
3019 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020}
3021
3022int tracing_open_generic(struct inode *inode, struct file *filp)
3023{
Steven Rostedt60a11772008-05-12 21:20:44 +02003024 if (tracing_disabled)
3025 return -ENODEV;
3026
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027 filp->private_data = inode->i_private;
3028 return 0;
3029}
3030
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003031bool tracing_is_disabled(void)
3032{
3033 return (tracing_disabled) ? true: false;
3034}
3035
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003036/*
3037 * Open and update trace_array ref count.
3038 * Must have the current trace_array passed to it.
3039 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003040static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003041{
3042 struct trace_array *tr = inode->i_private;
3043
3044 if (tracing_disabled)
3045 return -ENODEV;
3046
3047 if (trace_array_get(tr) < 0)
3048 return -ENODEV;
3049
3050 filp->private_data = inode->i_private;
3051
3052 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003053}
3054
Hannes Eder4fd27352009-02-10 19:44:12 +01003055static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003056{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003057 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003058 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003059 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003060 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003061
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003062 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003063 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003064 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003065 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003066
Oleg Nesterov6484c712013-07-23 17:26:10 +02003067 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003068 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003070
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003071 for_each_tracing_cpu(cpu) {
3072 if (iter->buffer_iter[cpu])
3073 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3074 }
3075
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076 if (iter->trace && iter->trace->close)
3077 iter->trace->close(iter);
3078
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003079 if (!iter->snapshot)
3080 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003081 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003082
3083 __trace_array_put(tr);
3084
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003085 mutex_unlock(&trace_types_lock);
3086
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003087 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003088 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003089 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003090 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003091 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003092
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003093 return 0;
3094}
3095
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003096static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3097{
3098 struct trace_array *tr = inode->i_private;
3099
3100 trace_array_put(tr);
3101 return 0;
3102}
3103
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003104static int tracing_single_release_tr(struct inode *inode, struct file *file)
3105{
3106 struct trace_array *tr = inode->i_private;
3107
3108 trace_array_put(tr);
3109
3110 return single_release(inode, file);
3111}
3112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113static int tracing_open(struct inode *inode, struct file *file)
3114{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003115 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003116 struct trace_iterator *iter;
3117 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003118
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003119 if (trace_array_get(tr) < 0)
3120 return -ENODEV;
3121
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3124 int cpu = tracing_get_cpu(inode);
3125
3126 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003127 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003128 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003129 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003130 }
3131
3132 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003133 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003134 if (IS_ERR(iter))
3135 ret = PTR_ERR(iter);
3136 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3137 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3138 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003139
3140 if (ret < 0)
3141 trace_array_put(tr);
3142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003143 return ret;
3144}
3145
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003146/*
3147 * Some tracers are not suitable for instance buffers.
3148 * A tracer is always available for the global array (toplevel)
3149 * or if it explicitly states that it is.
3150 */
3151static bool
3152trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3153{
3154 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3155}
3156
3157/* Find the next tracer that this trace array may use */
3158static struct tracer *
3159get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3160{
3161 while (t && !trace_ok_for_array(t, tr))
3162 t = t->next;
3163
3164 return t;
3165}
3166
Ingo Molnare309b412008-05-12 21:20:51 +02003167static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003168t_next(struct seq_file *m, void *v, loff_t *pos)
3169{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003170 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003171 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172
3173 (*pos)++;
3174
3175 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003176 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178 return t;
3179}
3180
3181static void *t_start(struct seq_file *m, loff_t *pos)
3182{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003183 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003184 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003185 loff_t l = 0;
3186
3187 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003188
3189 t = get_tracer_for_array(tr, trace_types);
3190 for (; t && l < *pos; t = t_next(m, t, &l))
3191 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003192
3193 return t;
3194}
3195
3196static void t_stop(struct seq_file *m, void *p)
3197{
3198 mutex_unlock(&trace_types_lock);
3199}
3200
3201static int t_show(struct seq_file *m, void *v)
3202{
3203 struct tracer *t = v;
3204
3205 if (!t)
3206 return 0;
3207
3208 seq_printf(m, "%s", t->name);
3209 if (t->next)
3210 seq_putc(m, ' ');
3211 else
3212 seq_putc(m, '\n');
3213
3214 return 0;
3215}
3216
James Morris88e9d342009-09-22 16:43:43 -07003217static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003218 .start = t_start,
3219 .next = t_next,
3220 .stop = t_stop,
3221 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222};
3223
3224static int show_traces_open(struct inode *inode, struct file *file)
3225{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003226 struct trace_array *tr = inode->i_private;
3227 struct seq_file *m;
3228 int ret;
3229
Steven Rostedt60a11772008-05-12 21:20:44 +02003230 if (tracing_disabled)
3231 return -ENODEV;
3232
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003233 ret = seq_open(file, &show_traces_seq_ops);
3234 if (ret)
3235 return ret;
3236
3237 m = file->private_data;
3238 m->private = tr;
3239
3240 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241}
3242
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003243static ssize_t
3244tracing_write_stub(struct file *filp, const char __user *ubuf,
3245 size_t count, loff_t *ppos)
3246{
3247 return count;
3248}
3249
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003250loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003251{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003252 int ret;
3253
Slava Pestov364829b2010-11-24 15:13:16 -08003254 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003255 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003256 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003257 file->f_pos = ret = 0;
3258
3259 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003260}
3261
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003262static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003263 .open = tracing_open,
3264 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003265 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003266 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003267 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003268};
3269
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003270static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003271 .open = show_traces_open,
3272 .read = seq_read,
3273 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003274 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003275};
3276
Ingo Molnar36dfe922008-05-12 21:20:52 +02003277/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003278 * The tracer itself will not take this lock, but still we want
3279 * to provide a consistent cpumask to user-space:
3280 */
3281static DEFINE_MUTEX(tracing_cpumask_update_lock);
3282
3283/*
3284 * Temporary storage for the character representation of the
3285 * CPU bitmask (and one more byte for the newline):
3286 */
3287static char mask_str[NR_CPUS + 1];
3288
Ingo Molnarc7078de2008-05-12 21:20:52 +02003289static ssize_t
3290tracing_cpumask_read(struct file *filp, char __user *ubuf,
3291 size_t count, loff_t *ppos)
3292{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003293 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003294 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003295
3296 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003297
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003298 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003299 if (count - len < 2) {
3300 count = -EINVAL;
3301 goto out_err;
3302 }
3303 len += sprintf(mask_str + len, "\n");
3304 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3305
3306out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003307 mutex_unlock(&tracing_cpumask_update_lock);
3308
3309 return count;
3310}
3311
3312static ssize_t
3313tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3314 size_t count, loff_t *ppos)
3315{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003316 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303317 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003318 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303319
3320 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3321 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003322
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303323 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003324 if (err)
3325 goto err_unlock;
3326
Li Zefan215368e2009-06-15 10:56:42 +08003327 mutex_lock(&tracing_cpumask_update_lock);
3328
Steven Rostedta5e25882008-12-02 15:34:05 -05003329 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003330 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003331 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003332 /*
3333 * Increase/decrease the disabled counter if we are
3334 * about to flip a bit in the cpumask:
3335 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003336 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303337 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003338 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3339 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003340 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003341 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303342 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003343 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3344 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003345 }
3346 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003347 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003348 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003349
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003350 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003351
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303353 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003354
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003356
3357err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003358 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003359
3360 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003361}
3362
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003363static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003364 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003365 .read = tracing_cpumask_read,
3366 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003367 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003368 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003369};
3370
Li Zefanfdb372e2009-12-08 11:15:59 +08003371static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003372{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003373 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003374 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003375 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003376 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003377
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003378 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003379 tracer_flags = tr->current_trace->flags->val;
3380 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003381
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003382 for (i = 0; trace_options[i]; i++) {
3383 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003384 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003385 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003386 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003387 }
3388
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003389 for (i = 0; trace_opts[i].name; i++) {
3390 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003391 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003392 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003393 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003394 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003395 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003396
Li Zefanfdb372e2009-12-08 11:15:59 +08003397 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003398}
3399
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003400static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003401 struct tracer_flags *tracer_flags,
3402 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003403{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003404 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003405 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003406
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003407 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003408 if (ret)
3409 return ret;
3410
3411 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003412 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003413 else
Zhaolei77708412009-08-07 18:53:21 +08003414 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003415 return 0;
3416}
3417
Li Zefan8d18eaa2009-12-08 11:17:06 +08003418/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003419static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003420{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003421 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003422 struct tracer_flags *tracer_flags = trace->flags;
3423 struct tracer_opt *opts = NULL;
3424 int i;
3425
3426 for (i = 0; tracer_flags->opts[i].name; i++) {
3427 opts = &tracer_flags->opts[i];
3428
3429 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003430 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003431 }
3432
3433 return -EINVAL;
3434}
3435
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003436/* Some tracers require overwrite to stay enabled */
3437int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3438{
3439 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3440 return -1;
3441
3442 return 0;
3443}
3444
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003445int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003446{
3447 /* do nothing if flag is already set */
3448 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003449 return 0;
3450
3451 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003452 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003453 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003454 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003455
3456 if (enabled)
3457 trace_flags |= mask;
3458 else
3459 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003460
3461 if (mask == TRACE_ITER_RECORD_CMD)
3462 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003463
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003464 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003465 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003466#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003467 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003468#endif
3469 }
Steven Rostedt81698832012-10-11 10:15:05 -04003470
3471 if (mask == TRACE_ITER_PRINTK)
3472 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003473
3474 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003475}
3476
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003477static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003478{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003479 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003480 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003481 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482 int i;
3483
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003484 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485
Li Zefan8d18eaa2009-12-08 11:17:06 +08003486 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 neg = 1;
3488 cmp += 2;
3489 }
3490
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003491 mutex_lock(&trace_types_lock);
3492
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003493 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003494 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003495 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003496 break;
3497 }
3498 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003499
3500 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003501 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003502 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003503
3504 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003505
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003506 return ret;
3507}
3508
3509static ssize_t
3510tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3511 size_t cnt, loff_t *ppos)
3512{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003513 struct seq_file *m = filp->private_data;
3514 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003515 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003516 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003517
3518 if (cnt >= sizeof(buf))
3519 return -EINVAL;
3520
3521 if (copy_from_user(&buf, ubuf, cnt))
3522 return -EFAULT;
3523
Steven Rostedta8dd2172013-01-09 20:54:17 -05003524 buf[cnt] = 0;
3525
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003526 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003527 if (ret < 0)
3528 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003529
Jiri Olsacf8517c2009-10-23 19:36:16 -04003530 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531
3532 return cnt;
3533}
3534
Li Zefanfdb372e2009-12-08 11:15:59 +08003535static int tracing_trace_options_open(struct inode *inode, struct file *file)
3536{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003537 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003538 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003539
Li Zefanfdb372e2009-12-08 11:15:59 +08003540 if (tracing_disabled)
3541 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003542
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003543 if (trace_array_get(tr) < 0)
3544 return -ENODEV;
3545
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003546 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3547 if (ret < 0)
3548 trace_array_put(tr);
3549
3550 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003551}
3552
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003553static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003554 .open = tracing_trace_options_open,
3555 .read = seq_read,
3556 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003557 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003558 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003559};
3560
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003561static const char readme_msg[] =
3562 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003563 "# echo 0 > tracing_on : quick way to disable tracing\n"
3564 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3565 " Important files:\n"
3566 " trace\t\t\t- The static contents of the buffer\n"
3567 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3568 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3569 " current_tracer\t- function and latency tracers\n"
3570 " available_tracers\t- list of configured tracers for current_tracer\n"
3571 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3572 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3573 " trace_clock\t\t-change the clock used to order events\n"
3574 " local: Per cpu clock but may not be synced across CPUs\n"
3575 " global: Synced across CPUs but slows tracing down.\n"
3576 " counter: Not a clock, but just an increment\n"
3577 " uptime: Jiffy counter from time of boot\n"
3578 " perf: Same clock that perf events use\n"
3579#ifdef CONFIG_X86_64
3580 " x86-tsc: TSC cycle counter\n"
3581#endif
3582 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3583 " tracing_cpumask\t- Limit which CPUs to trace\n"
3584 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3585 "\t\t\t Remove sub-buffer with rmdir\n"
3586 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003587 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3588 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003589#ifdef CONFIG_DYNAMIC_FTRACE
3590 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3592 "\t\t\t functions\n"
3593 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3594 "\t modules: Can select a group via module\n"
3595 "\t Format: :mod:<module-name>\n"
3596 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3597 "\t triggers: a command to perform when function is hit\n"
3598 "\t Format: <function>:<trigger>[:count]\n"
3599 "\t trigger: traceon, traceoff\n"
3600 "\t\t enable_event:<system>:<event>\n"
3601 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003602#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003603 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003604#endif
3605#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003606 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003607#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003608 "\t\t dump\n"
3609 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003610 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3611 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3612 "\t The first one will disable tracing every time do_fault is hit\n"
3613 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3614 "\t The first time do trap is hit and it disables tracing, the\n"
3615 "\t counter will decrement to 2. If tracing is already disabled,\n"
3616 "\t the counter will not decrement. It only decrements when the\n"
3617 "\t trigger did work\n"
3618 "\t To remove trigger without count:\n"
3619 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3620 "\t To remove trigger with a count:\n"
3621 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003622 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003623 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3624 "\t modules: Can select a group via module command :mod:\n"
3625 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003626#endif /* CONFIG_DYNAMIC_FTRACE */
3627#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003628 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3629 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003630#endif
3631#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3632 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3633 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3634#endif
3635#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003636 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3637 "\t\t\t snapshot buffer. Read the contents for more\n"
3638 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003639#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003640#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003641 " stack_trace\t\t- Shows the max stack trace when active\n"
3642 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003643 "\t\t\t Write into this file to reset the max size (trigger a\n"
3644 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003645#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003646 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3647 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003648#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003649#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003650 " events/\t\t- Directory containing all trace event subsystems:\n"
3651 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3652 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003653 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3654 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003655 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003656 " events/<system>/<event>/\t- Directory containing control files for\n"
3657 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003658 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3659 " filter\t\t- If set, only events passing filter are traced\n"
3660 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003661 "\t Format: <trigger>[:count][if <filter>]\n"
3662 "\t trigger: traceon, traceoff\n"
3663 "\t enable_event:<system>:<event>\n"
3664 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003665#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003666 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003667#endif
3668#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003669 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003670#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003671 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3672 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3673 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3674 "\t events/block/block_unplug/trigger\n"
3675 "\t The first disables tracing every time block_unplug is hit.\n"
3676 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3677 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3678 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3679 "\t Like function triggers, the counter is only decremented if it\n"
3680 "\t enabled or disabled tracing.\n"
3681 "\t To remove a trigger without a count:\n"
3682 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3683 "\t To remove a trigger with a count:\n"
3684 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3685 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003686;
3687
3688static ssize_t
3689tracing_readme_read(struct file *filp, char __user *ubuf,
3690 size_t cnt, loff_t *ppos)
3691{
3692 return simple_read_from_buffer(ubuf, cnt, ppos,
3693 readme_msg, strlen(readme_msg));
3694}
3695
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003696static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003697 .open = tracing_open_generic,
3698 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003699 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003700};
3701
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003702static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003703tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3704 size_t cnt, loff_t *ppos)
3705{
3706 char *buf_comm;
3707 char *file_buf;
3708 char *buf;
3709 int len = 0;
3710 int pid;
3711 int i;
3712
3713 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3714 if (!file_buf)
3715 return -ENOMEM;
3716
3717 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3718 if (!buf_comm) {
3719 kfree(file_buf);
3720 return -ENOMEM;
3721 }
3722
3723 buf = file_buf;
3724
3725 for (i = 0; i < SAVED_CMDLINES; i++) {
3726 int r;
3727
3728 pid = map_cmdline_to_pid[i];
3729 if (pid == -1 || pid == NO_CMDLINE_MAP)
3730 continue;
3731
3732 trace_find_cmdline(pid, buf_comm);
3733 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3734 buf += r;
3735 len += r;
3736 }
3737
3738 len = simple_read_from_buffer(ubuf, cnt, ppos,
3739 file_buf, len);
3740
3741 kfree(file_buf);
3742 kfree(buf_comm);
3743
3744 return len;
3745}
3746
3747static const struct file_operations tracing_saved_cmdlines_fops = {
3748 .open = tracing_open_generic,
3749 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003750 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003751};
3752
3753static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003754tracing_set_trace_read(struct file *filp, char __user *ubuf,
3755 size_t cnt, loff_t *ppos)
3756{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003757 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003758 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003759 int r;
3760
3761 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003762 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003763 mutex_unlock(&trace_types_lock);
3764
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003765 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003766}
3767
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003768int tracer_init(struct tracer *t, struct trace_array *tr)
3769{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003770 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003771 return t->init(tr);
3772}
3773
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003774static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003775{
3776 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003777
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003778 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003779 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003780}
3781
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003782#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003783/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003784static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3785 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003786{
3787 int cpu, ret = 0;
3788
3789 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3790 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003791 ret = ring_buffer_resize(trace_buf->buffer,
3792 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003793 if (ret < 0)
3794 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003795 per_cpu_ptr(trace_buf->data, cpu)->entries =
3796 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003797 }
3798 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003799 ret = ring_buffer_resize(trace_buf->buffer,
3800 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003801 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003802 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3803 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003804 }
3805
3806 return ret;
3807}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003808#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003809
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003810static int __tracing_resize_ring_buffer(struct trace_array *tr,
3811 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003812{
3813 int ret;
3814
3815 /*
3816 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003817 * we use the size that was given, and we can forget about
3818 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003819 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003820 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003821
Steven Rostedtb382ede62012-10-10 21:44:34 -04003822 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003823 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003824 return 0;
3825
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003826 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003827 if (ret < 0)
3828 return ret;
3829
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003830#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003831 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3832 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003833 goto out;
3834
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003835 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003836 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003837 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3838 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003839 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003840 /*
3841 * AARGH! We are left with different
3842 * size max buffer!!!!
3843 * The max buffer is our "snapshot" buffer.
3844 * When a tracer needs a snapshot (one of the
3845 * latency tracers), it swaps the max buffer
3846 * with the saved snap shot. We succeeded to
3847 * update the size of the main buffer, but failed to
3848 * update the size of the max buffer. But when we tried
3849 * to reset the main buffer to the original size, we
3850 * failed there too. This is very unlikely to
3851 * happen, but if it does, warn and kill all
3852 * tracing.
3853 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003854 WARN_ON(1);
3855 tracing_disabled = 1;
3856 }
3857 return ret;
3858 }
3859
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003860 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003861 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003862 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003863 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003864
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003865 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003866#endif /* CONFIG_TRACER_MAX_TRACE */
3867
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003868 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003869 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003870 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003871 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003872
3873 return ret;
3874}
3875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003876static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3877 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003878{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003879 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003880
3881 mutex_lock(&trace_types_lock);
3882
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003883 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3884 /* make sure, this cpu is enabled in the mask */
3885 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3886 ret = -EINVAL;
3887 goto out;
3888 }
3889 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003890
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003891 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003892 if (ret < 0)
3893 ret = -ENOMEM;
3894
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003895out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003896 mutex_unlock(&trace_types_lock);
3897
3898 return ret;
3899}
3900
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003901
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003902/**
3903 * tracing_update_buffers - used by tracing facility to expand ring buffers
3904 *
3905 * To save on memory when the tracing is never used on a system with it
3906 * configured in. The ring buffers are set to a minimum size. But once
3907 * a user starts to use the tracing facility, then they need to grow
3908 * to their default size.
3909 *
3910 * This function is to be called when a tracer is about to be used.
3911 */
3912int tracing_update_buffers(void)
3913{
3914 int ret = 0;
3915
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003916 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003917 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003918 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003919 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003920 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003921
3922 return ret;
3923}
3924
Steven Rostedt577b7852009-02-26 23:43:05 -05003925struct trace_option_dentry;
3926
3927static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003928create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003929
3930static void
3931destroy_trace_option_files(struct trace_option_dentry *topts);
3932
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003933/*
3934 * Used to clear out the tracer before deletion of an instance.
3935 * Must have trace_types_lock held.
3936 */
3937static void tracing_set_nop(struct trace_array *tr)
3938{
3939 if (tr->current_trace == &nop_trace)
3940 return;
3941
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003942 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003943
3944 if (tr->current_trace->reset)
3945 tr->current_trace->reset(tr);
3946
3947 tr->current_trace = &nop_trace;
3948}
3949
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003950static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003951{
Steven Rostedt577b7852009-02-26 23:43:05 -05003952 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003953 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003955 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003957 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003958
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003959 mutex_lock(&trace_types_lock);
3960
Steven Rostedt73c51622009-03-11 13:42:01 -04003961 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003962 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003963 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003964 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003965 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003966 ret = 0;
3967 }
3968
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003969 for (t = trace_types; t; t = t->next) {
3970 if (strcmp(t->name, buf) == 0)
3971 break;
3972 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003973 if (!t) {
3974 ret = -EINVAL;
3975 goto out;
3976 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003977 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003978 goto out;
3979
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003980 /* Some tracers are only allowed for the top level buffer */
3981 if (!trace_ok_for_array(t, tr)) {
3982 ret = -EINVAL;
3983 goto out;
3984 }
3985
Steven Rostedt9f029e82008-11-12 15:24:24 -05003986 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003987
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003988 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003989
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003990 if (tr->current_trace->reset)
3991 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003992
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003993 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003994 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003995
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003996#ifdef CONFIG_TRACER_MAX_TRACE
3997 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003998
3999 if (had_max_tr && !t->use_max_tr) {
4000 /*
4001 * We need to make sure that the update_max_tr sees that
4002 * current_trace changed to nop_trace to keep it from
4003 * swapping the buffers after we resize it.
4004 * The update_max_tr is called from interrupts disabled
4005 * so a synchronized_sched() is sufficient.
4006 */
4007 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004008 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004009 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004010#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004011 /* Currently, only the top instance has options */
4012 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4013 destroy_trace_option_files(topts);
4014 topts = create_trace_option_files(tr, t);
4015 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016
4017#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004018 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004019 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004020 if (ret < 0)
4021 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004022 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004023#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004024
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004025 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004026 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004027 if (ret)
4028 goto out;
4029 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004030
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004032 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004033 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004034 out:
4035 mutex_unlock(&trace_types_lock);
4036
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004037 return ret;
4038}
4039
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004040static ssize_t
4041tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4042 size_t cnt, loff_t *ppos)
4043{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004044 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004045 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004046 int i;
4047 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004048 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004049
Steven Rostedt60063a62008-10-28 10:44:24 -04004050 ret = cnt;
4051
Li Zefanee6c2c12009-09-18 14:06:47 +08004052 if (cnt > MAX_TRACER_SIZE)
4053 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004054
4055 if (copy_from_user(&buf, ubuf, cnt))
4056 return -EFAULT;
4057
4058 buf[cnt] = 0;
4059
4060 /* strip ending whitespace. */
4061 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4062 buf[i] = 0;
4063
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004064 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004065 if (err)
4066 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004067
Jiri Olsacf8517c2009-10-23 19:36:16 -04004068 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004069
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004070 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004071}
4072
4073static ssize_t
4074tracing_max_lat_read(struct file *filp, char __user *ubuf,
4075 size_t cnt, loff_t *ppos)
4076{
4077 unsigned long *ptr = filp->private_data;
4078 char buf[64];
4079 int r;
4080
Steven Rostedtcffae432008-05-12 21:21:00 +02004081 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004082 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004083 if (r > sizeof(buf))
4084 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004085 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086}
4087
4088static ssize_t
4089tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4090 size_t cnt, loff_t *ppos)
4091{
Hannes Eder5e398412009-02-10 19:44:34 +01004092 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004093 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004094 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095
Peter Huewe22fe9b52011-06-07 21:58:27 +02004096 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4097 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004098 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099
4100 *ptr = val * 1000;
4101
4102 return cnt;
4103}
4104
Steven Rostedtb3806b42008-05-12 21:20:46 +02004105static int tracing_open_pipe(struct inode *inode, struct file *filp)
4106{
Oleg Nesterov15544202013-07-23 17:25:57 +02004107 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004108 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004109 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004110
4111 if (tracing_disabled)
4112 return -ENODEV;
4113
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004114 if (trace_array_get(tr) < 0)
4115 return -ENODEV;
4116
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004117 mutex_lock(&trace_types_lock);
4118
Steven Rostedtb3806b42008-05-12 21:20:46 +02004119 /* create a buffer to store the information to pass to userspace */
4120 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004121 if (!iter) {
4122 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004123 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004124 goto out;
4125 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004126
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004127 /*
4128 * We make a copy of the current tracer to avoid concurrent
4129 * changes on it while we are reading.
4130 */
4131 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4132 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004133 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004134 goto fail;
4135 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004136 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004137
4138 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4139 ret = -ENOMEM;
4140 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304141 }
4142
Steven Rostedta3097202008-11-07 22:36:02 -05004143 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304144 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004145
Steven Rostedt112f38a72009-06-01 15:16:05 -04004146 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4147 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4148
David Sharp8be07092012-11-13 12:18:22 -08004149 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004150 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004151 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4152
Oleg Nesterov15544202013-07-23 17:25:57 +02004153 iter->tr = tr;
4154 iter->trace_buffer = &tr->trace_buffer;
4155 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004156 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004157 filp->private_data = iter;
4158
Steven Rostedt107bad82008-05-12 21:21:01 +02004159 if (iter->trace->pipe_open)
4160 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004161
Arnd Bergmannb4447862010-07-07 23:40:11 +02004162 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004163out:
4164 mutex_unlock(&trace_types_lock);
4165 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004166
4167fail:
4168 kfree(iter->trace);
4169 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004170 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004171 mutex_unlock(&trace_types_lock);
4172 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004173}
4174
4175static int tracing_release_pipe(struct inode *inode, struct file *file)
4176{
4177 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004178 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004179
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004180 mutex_lock(&trace_types_lock);
4181
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004182 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004183 iter->trace->pipe_close(iter);
4184
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004185 mutex_unlock(&trace_types_lock);
4186
Rusty Russell44623442009-01-01 10:12:23 +10304187 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004188 mutex_destroy(&iter->mutex);
4189 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004190 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004191
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004192 trace_array_put(tr);
4193
Steven Rostedtb3806b42008-05-12 21:20:46 +02004194 return 0;
4195}
4196
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004197static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004198trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004199{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004200 /* Iterators are static, they should be filled or empty */
4201 if (trace_buffer_iter(iter, iter->cpu_file))
4202 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004203
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004204 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004205 /*
4206 * Always select as readable when in blocking mode
4207 */
4208 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004209 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004210 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004211 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004212}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004213
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004214static unsigned int
4215tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4216{
4217 struct trace_iterator *iter = filp->private_data;
4218
4219 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004220}
4221
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004222/* Must be called with trace_types_lock mutex held. */
4223static int tracing_wait_pipe(struct file *filp)
4224{
4225 struct trace_iterator *iter = filp->private_data;
4226
4227 while (trace_empty(iter)) {
4228
4229 if ((filp->f_flags & O_NONBLOCK)) {
4230 return -EAGAIN;
4231 }
4232
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004233 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004234 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004235 * We still block if tracing is disabled, but we have never
4236 * read anything. This allows a user to cat this file, and
4237 * then enable tracing. But after we have read something,
4238 * we give an EOF when tracing is again disabled.
4239 *
4240 * iter->pos will be 0 if we haven't read anything.
4241 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004242 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004243 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004244
4245 mutex_unlock(&iter->mutex);
4246
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04004247 wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004248
4249 mutex_lock(&iter->mutex);
4250
4251 if (signal_pending(current))
4252 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004253 }
4254
4255 return 1;
4256}
4257
Steven Rostedtb3806b42008-05-12 21:20:46 +02004258/*
4259 * Consumer reader.
4260 */
4261static ssize_t
4262tracing_read_pipe(struct file *filp, char __user *ubuf,
4263 size_t cnt, loff_t *ppos)
4264{
4265 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004266 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004267 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004268
4269 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004270 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4271 if (sret != -EBUSY)
4272 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004273
Steven Rostedtf9520752009-03-02 14:04:40 -05004274 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004275
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004276 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004277 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004278 if (unlikely(iter->trace->name != tr->current_trace->name))
4279 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004280 mutex_unlock(&trace_types_lock);
4281
4282 /*
4283 * Avoid more than one consumer on a single file descriptor
4284 * This is just a matter of traces coherency, the ring buffer itself
4285 * is protected.
4286 */
4287 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004288 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004289 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4290 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004291 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004292 }
4293
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004294waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004295 sret = tracing_wait_pipe(filp);
4296 if (sret <= 0)
4297 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004298
4299 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004300 if (trace_empty(iter)) {
4301 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004302 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004303 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004304
4305 if (cnt >= PAGE_SIZE)
4306 cnt = PAGE_SIZE - 1;
4307
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004308 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004309 memset(&iter->seq, 0,
4310 sizeof(struct trace_iterator) -
4311 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004312 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004313 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004314
Lai Jiangshan4f535962009-05-18 19:35:34 +08004315 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004316 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004317 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004318 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004319 int len = iter->seq.len;
4320
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004321 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004322 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004323 /* don't print partial lines */
4324 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004325 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004326 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004327 if (ret != TRACE_TYPE_NO_CONSUME)
4328 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004329
4330 if (iter->seq.len >= cnt)
4331 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004332
4333 /*
4334 * Setting the full flag means we reached the trace_seq buffer
4335 * size and we should leave by partial output condition above.
4336 * One of the trace_seq_* functions is not used properly.
4337 */
4338 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4339 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004340 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004341 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004342 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004343
Steven Rostedtb3806b42008-05-12 21:20:46 +02004344 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004345 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4346 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004347 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004348
4349 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004350 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004351 * entries, go back to wait for more entries.
4352 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004353 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004354 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004355
Steven Rostedt107bad82008-05-12 21:21:01 +02004356out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004357 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004358
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004359 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004360}
4361
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004362static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4363 unsigned int idx)
4364{
4365 __free_page(spd->pages[idx]);
4366}
4367
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004368static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004369 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004370 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004371 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004372 .steal = generic_pipe_buf_steal,
4373 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004374};
4375
Steven Rostedt34cd4992009-02-09 12:06:29 -05004376static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004377tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004378{
4379 size_t count;
4380 int ret;
4381
4382 /* Seq buffer is page-sized, exactly what we need. */
4383 for (;;) {
4384 count = iter->seq.len;
4385 ret = print_trace_line(iter);
4386 count = iter->seq.len - count;
4387 if (rem < count) {
4388 rem = 0;
4389 iter->seq.len -= count;
4390 break;
4391 }
4392 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4393 iter->seq.len -= count;
4394 break;
4395 }
4396
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004397 if (ret != TRACE_TYPE_NO_CONSUME)
4398 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004399 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004400 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004401 rem = 0;
4402 iter->ent = NULL;
4403 break;
4404 }
4405 }
4406
4407 return rem;
4408}
4409
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004410static ssize_t tracing_splice_read_pipe(struct file *filp,
4411 loff_t *ppos,
4412 struct pipe_inode_info *pipe,
4413 size_t len,
4414 unsigned int flags)
4415{
Jens Axboe35f3d142010-05-20 10:43:18 +02004416 struct page *pages_def[PIPE_DEF_BUFFERS];
4417 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004418 struct trace_iterator *iter = filp->private_data;
4419 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004420 .pages = pages_def,
4421 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004422 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004423 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004424 .flags = flags,
4425 .ops = &tracing_pipe_buf_ops,
4426 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004427 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004428 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004429 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004430 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004431 unsigned int i;
4432
Jens Axboe35f3d142010-05-20 10:43:18 +02004433 if (splice_grow_spd(pipe, &spd))
4434 return -ENOMEM;
4435
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004436 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004437 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004438 if (unlikely(iter->trace->name != tr->current_trace->name))
4439 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004440 mutex_unlock(&trace_types_lock);
4441
4442 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004443
4444 if (iter->trace->splice_read) {
4445 ret = iter->trace->splice_read(iter, filp,
4446 ppos, pipe, len, flags);
4447 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004448 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004449 }
4450
4451 ret = tracing_wait_pipe(filp);
4452 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004453 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004454
Jason Wessel955b61e2010-08-05 09:22:23 -05004455 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004456 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004457 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004458 }
4459
Lai Jiangshan4f535962009-05-18 19:35:34 +08004460 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004461 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004462
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004463 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004464 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004465 spd.pages[i] = alloc_page(GFP_KERNEL);
4466 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004467 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004468
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004469 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004470
4471 /* Copy the data into the page, so we can start over. */
4472 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004473 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004474 iter->seq.len);
4475 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004476 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004477 break;
4478 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004479 spd.partial[i].offset = 0;
4480 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004481
Steven Rostedtf9520752009-03-02 14:04:40 -05004482 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004483 }
4484
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004485 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004486 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004487 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004488
4489 spd.nr_pages = i;
4490
Jens Axboe35f3d142010-05-20 10:43:18 +02004491 ret = splice_to_pipe(pipe, &spd);
4492out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004493 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004494 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004495
Steven Rostedt34cd4992009-02-09 12:06:29 -05004496out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004497 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004498 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004499}
4500
Steven Rostedta98a3c32008-05-12 21:20:59 +02004501static ssize_t
4502tracing_entries_read(struct file *filp, char __user *ubuf,
4503 size_t cnt, loff_t *ppos)
4504{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004505 struct inode *inode = file_inode(filp);
4506 struct trace_array *tr = inode->i_private;
4507 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004508 char buf[64];
4509 int r = 0;
4510 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004511
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004512 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004513
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004514 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004515 int cpu, buf_size_same;
4516 unsigned long size;
4517
4518 size = 0;
4519 buf_size_same = 1;
4520 /* check if all cpu sizes are same */
4521 for_each_tracing_cpu(cpu) {
4522 /* fill in the size from first enabled cpu */
4523 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004524 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4525 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004526 buf_size_same = 0;
4527 break;
4528 }
4529 }
4530
4531 if (buf_size_same) {
4532 if (!ring_buffer_expanded)
4533 r = sprintf(buf, "%lu (expanded: %lu)\n",
4534 size >> 10,
4535 trace_buf_size >> 10);
4536 else
4537 r = sprintf(buf, "%lu\n", size >> 10);
4538 } else
4539 r = sprintf(buf, "X\n");
4540 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004541 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004542
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004543 mutex_unlock(&trace_types_lock);
4544
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004545 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4546 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004547}
4548
4549static ssize_t
4550tracing_entries_write(struct file *filp, const char __user *ubuf,
4551 size_t cnt, loff_t *ppos)
4552{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004553 struct inode *inode = file_inode(filp);
4554 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004555 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004556 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004557
Peter Huewe22fe9b52011-06-07 21:58:27 +02004558 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4559 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004560 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004561
4562 /* must have at least 1 entry */
4563 if (!val)
4564 return -EINVAL;
4565
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004566 /* value is in KB */
4567 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004568 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004569 if (ret < 0)
4570 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004571
Jiri Olsacf8517c2009-10-23 19:36:16 -04004572 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004573
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004574 return cnt;
4575}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004576
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004577static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004578tracing_total_entries_read(struct file *filp, char __user *ubuf,
4579 size_t cnt, loff_t *ppos)
4580{
4581 struct trace_array *tr = filp->private_data;
4582 char buf[64];
4583 int r, cpu;
4584 unsigned long size = 0, expanded_size = 0;
4585
4586 mutex_lock(&trace_types_lock);
4587 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004588 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004589 if (!ring_buffer_expanded)
4590 expanded_size += trace_buf_size >> 10;
4591 }
4592 if (ring_buffer_expanded)
4593 r = sprintf(buf, "%lu\n", size);
4594 else
4595 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4596 mutex_unlock(&trace_types_lock);
4597
4598 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4599}
4600
4601static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004602tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4603 size_t cnt, loff_t *ppos)
4604{
4605 /*
4606 * There is no need to read what the user has written, this function
4607 * is just to make sure that there is no error when "echo" is used
4608 */
4609
4610 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004611
4612 return cnt;
4613}
4614
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004615static int
4616tracing_free_buffer_release(struct inode *inode, struct file *filp)
4617{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004618 struct trace_array *tr = inode->i_private;
4619
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004620 /* disable tracing ? */
4621 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004622 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004623 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004624 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004625
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004626 trace_array_put(tr);
4627
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004628 return 0;
4629}
4630
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004631static ssize_t
4632tracing_mark_write(struct file *filp, const char __user *ubuf,
4633 size_t cnt, loff_t *fpos)
4634{
Steven Rostedtd696b582011-09-22 11:50:27 -04004635 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004636 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004637 struct ring_buffer_event *event;
4638 struct ring_buffer *buffer;
4639 struct print_entry *entry;
4640 unsigned long irq_flags;
4641 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004642 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004643 int nr_pages = 1;
4644 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004645 int offset;
4646 int size;
4647 int len;
4648 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004649 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004650
Steven Rostedtc76f0692008-11-07 22:36:02 -05004651 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004652 return -EINVAL;
4653
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004654 if (!(trace_flags & TRACE_ITER_MARKERS))
4655 return -EINVAL;
4656
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004657 if (cnt > TRACE_BUF_SIZE)
4658 cnt = TRACE_BUF_SIZE;
4659
Steven Rostedtd696b582011-09-22 11:50:27 -04004660 /*
4661 * Userspace is injecting traces into the kernel trace buffer.
4662 * We want to be as non intrusive as possible.
4663 * To do so, we do not want to allocate any special buffers
4664 * or take any locks, but instead write the userspace data
4665 * straight into the ring buffer.
4666 *
4667 * First we need to pin the userspace buffer into memory,
4668 * which, most likely it is, because it just referenced it.
4669 * But there's no guarantee that it is. By using get_user_pages_fast()
4670 * and kmap_atomic/kunmap_atomic() we can get access to the
4671 * pages directly. We then write the data directly into the
4672 * ring buffer.
4673 */
4674 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004675
Steven Rostedtd696b582011-09-22 11:50:27 -04004676 /* check if we cross pages */
4677 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4678 nr_pages = 2;
4679
4680 offset = addr & (PAGE_SIZE - 1);
4681 addr &= PAGE_MASK;
4682
4683 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4684 if (ret < nr_pages) {
4685 while (--ret >= 0)
4686 put_page(pages[ret]);
4687 written = -EFAULT;
4688 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004689 }
4690
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004691 for (i = 0; i < nr_pages; i++)
4692 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004693
4694 local_save_flags(irq_flags);
4695 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004696 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004697 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4698 irq_flags, preempt_count());
4699 if (!event) {
4700 /* Ring buffer disabled, return as if not open for write */
4701 written = -EBADF;
4702 goto out_unlock;
4703 }
4704
4705 entry = ring_buffer_event_data(event);
4706 entry->ip = _THIS_IP_;
4707
4708 if (nr_pages == 2) {
4709 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004710 memcpy(&entry->buf, map_page[0] + offset, len);
4711 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004712 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004713 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004714
4715 if (entry->buf[cnt - 1] != '\n') {
4716 entry->buf[cnt] = '\n';
4717 entry->buf[cnt + 1] = '\0';
4718 } else
4719 entry->buf[cnt] = '\0';
4720
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004721 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004722
4723 written = cnt;
4724
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004725 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004726
Steven Rostedtd696b582011-09-22 11:50:27 -04004727 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004728 for (i = 0; i < nr_pages; i++){
4729 kunmap_atomic(map_page[i]);
4730 put_page(pages[i]);
4731 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004732 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004733 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004734}
4735
Li Zefan13f16d22009-12-08 11:16:11 +08004736static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004737{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004738 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004739 int i;
4740
4741 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004742 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004743 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004744 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4745 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004746 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004747
Li Zefan13f16d22009-12-08 11:16:11 +08004748 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004749}
4750
Steven Rostedte1e232c2014-02-10 23:38:46 -05004751static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004752{
Zhaolei5079f322009-08-25 16:12:56 +08004753 int i;
4754
Zhaolei5079f322009-08-25 16:12:56 +08004755 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4756 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4757 break;
4758 }
4759 if (i == ARRAY_SIZE(trace_clocks))
4760 return -EINVAL;
4761
Zhaolei5079f322009-08-25 16:12:56 +08004762 mutex_lock(&trace_types_lock);
4763
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004764 tr->clock_id = i;
4765
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004766 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004767
David Sharp60303ed2012-10-11 16:27:52 -07004768 /*
4769 * New clock may not be consistent with the previous clock.
4770 * Reset the buffer so that it doesn't have incomparable timestamps.
4771 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004772 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004773
4774#ifdef CONFIG_TRACER_MAX_TRACE
4775 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4776 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004777 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004778#endif
David Sharp60303ed2012-10-11 16:27:52 -07004779
Zhaolei5079f322009-08-25 16:12:56 +08004780 mutex_unlock(&trace_types_lock);
4781
Steven Rostedte1e232c2014-02-10 23:38:46 -05004782 return 0;
4783}
4784
4785static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4786 size_t cnt, loff_t *fpos)
4787{
4788 struct seq_file *m = filp->private_data;
4789 struct trace_array *tr = m->private;
4790 char buf[64];
4791 const char *clockstr;
4792 int ret;
4793
4794 if (cnt >= sizeof(buf))
4795 return -EINVAL;
4796
4797 if (copy_from_user(&buf, ubuf, cnt))
4798 return -EFAULT;
4799
4800 buf[cnt] = 0;
4801
4802 clockstr = strstrip(buf);
4803
4804 ret = tracing_set_clock(tr, clockstr);
4805 if (ret)
4806 return ret;
4807
Zhaolei5079f322009-08-25 16:12:56 +08004808 *fpos += cnt;
4809
4810 return cnt;
4811}
4812
Li Zefan13f16d22009-12-08 11:16:11 +08004813static int tracing_clock_open(struct inode *inode, struct file *file)
4814{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004815 struct trace_array *tr = inode->i_private;
4816 int ret;
4817
Li Zefan13f16d22009-12-08 11:16:11 +08004818 if (tracing_disabled)
4819 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004820
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004821 if (trace_array_get(tr))
4822 return -ENODEV;
4823
4824 ret = single_open(file, tracing_clock_show, inode->i_private);
4825 if (ret < 0)
4826 trace_array_put(tr);
4827
4828 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004829}
4830
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004831struct ftrace_buffer_info {
4832 struct trace_iterator iter;
4833 void *spare;
4834 unsigned int read;
4835};
4836
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004837#ifdef CONFIG_TRACER_SNAPSHOT
4838static int tracing_snapshot_open(struct inode *inode, struct file *file)
4839{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004840 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004841 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004842 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004843 int ret = 0;
4844
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004845 if (trace_array_get(tr) < 0)
4846 return -ENODEV;
4847
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004848 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004849 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004850 if (IS_ERR(iter))
4851 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004852 } else {
4853 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004854 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004855 m = kzalloc(sizeof(*m), GFP_KERNEL);
4856 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004857 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004858 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4859 if (!iter) {
4860 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004861 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004862 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004863 ret = 0;
4864
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004865 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004866 iter->trace_buffer = &tr->max_buffer;
4867 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004868 m->private = iter;
4869 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004870 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004871out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004872 if (ret < 0)
4873 trace_array_put(tr);
4874
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004875 return ret;
4876}
4877
4878static ssize_t
4879tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4880 loff_t *ppos)
4881{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004882 struct seq_file *m = filp->private_data;
4883 struct trace_iterator *iter = m->private;
4884 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004885 unsigned long val;
4886 int ret;
4887
4888 ret = tracing_update_buffers();
4889 if (ret < 0)
4890 return ret;
4891
4892 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4893 if (ret)
4894 return ret;
4895
4896 mutex_lock(&trace_types_lock);
4897
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004898 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004899 ret = -EBUSY;
4900 goto out;
4901 }
4902
4903 switch (val) {
4904 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004905 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4906 ret = -EINVAL;
4907 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004908 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004909 if (tr->allocated_snapshot)
4910 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004911 break;
4912 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004913/* Only allow per-cpu swap if the ring buffer supports it */
4914#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4915 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4916 ret = -EINVAL;
4917 break;
4918 }
4919#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004920 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004921 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004922 if (ret < 0)
4923 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004924 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004925 local_irq_disable();
4926 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004927 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004928 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004929 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004930 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004931 local_irq_enable();
4932 break;
4933 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004934 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004935 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4936 tracing_reset_online_cpus(&tr->max_buffer);
4937 else
4938 tracing_reset(&tr->max_buffer, iter->cpu_file);
4939 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004940 break;
4941 }
4942
4943 if (ret >= 0) {
4944 *ppos += cnt;
4945 ret = cnt;
4946 }
4947out:
4948 mutex_unlock(&trace_types_lock);
4949 return ret;
4950}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004951
4952static int tracing_snapshot_release(struct inode *inode, struct file *file)
4953{
4954 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004955 int ret;
4956
4957 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004958
4959 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004960 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004961
4962 /* If write only, the seq_file is just a stub */
4963 if (m)
4964 kfree(m->private);
4965 kfree(m);
4966
4967 return 0;
4968}
4969
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004970static int tracing_buffers_open(struct inode *inode, struct file *filp);
4971static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4972 size_t count, loff_t *ppos);
4973static int tracing_buffers_release(struct inode *inode, struct file *file);
4974static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4975 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4976
4977static int snapshot_raw_open(struct inode *inode, struct file *filp)
4978{
4979 struct ftrace_buffer_info *info;
4980 int ret;
4981
4982 ret = tracing_buffers_open(inode, filp);
4983 if (ret < 0)
4984 return ret;
4985
4986 info = filp->private_data;
4987
4988 if (info->iter.trace->use_max_tr) {
4989 tracing_buffers_release(inode, filp);
4990 return -EBUSY;
4991 }
4992
4993 info->iter.snapshot = true;
4994 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4995
4996 return ret;
4997}
4998
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004999#endif /* CONFIG_TRACER_SNAPSHOT */
5000
5001
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005002static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005003 .open = tracing_open_generic,
5004 .read = tracing_max_lat_read,
5005 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005006 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005007};
5008
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005009static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005010 .open = tracing_open_generic,
5011 .read = tracing_set_trace_read,
5012 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005013 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005014};
5015
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005016static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005017 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005018 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005019 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005020 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005021 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005022 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005023};
5024
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005025static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005026 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005027 .read = tracing_entries_read,
5028 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005029 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005030 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005031};
5032
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005033static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005034 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005035 .read = tracing_total_entries_read,
5036 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005037 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005038};
5039
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005040static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005041 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005042 .write = tracing_free_buffer_write,
5043 .release = tracing_free_buffer_release,
5044};
5045
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005046static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005047 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005048 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005049 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005050 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005051};
5052
Zhaolei5079f322009-08-25 16:12:56 +08005053static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005054 .open = tracing_clock_open,
5055 .read = seq_read,
5056 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005057 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005058 .write = tracing_clock_write,
5059};
5060
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005061#ifdef CONFIG_TRACER_SNAPSHOT
5062static const struct file_operations snapshot_fops = {
5063 .open = tracing_snapshot_open,
5064 .read = seq_read,
5065 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005066 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005067 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005068};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005069
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005070static const struct file_operations snapshot_raw_fops = {
5071 .open = snapshot_raw_open,
5072 .read = tracing_buffers_read,
5073 .release = tracing_buffers_release,
5074 .splice_read = tracing_buffers_splice_read,
5075 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005076};
5077
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005078#endif /* CONFIG_TRACER_SNAPSHOT */
5079
Steven Rostedt2cadf912008-12-01 22:20:19 -05005080static int tracing_buffers_open(struct inode *inode, struct file *filp)
5081{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005082 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005083 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005084 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005085
5086 if (tracing_disabled)
5087 return -ENODEV;
5088
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005089 if (trace_array_get(tr) < 0)
5090 return -ENODEV;
5091
Steven Rostedt2cadf912008-12-01 22:20:19 -05005092 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005093 if (!info) {
5094 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005095 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005096 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005097
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005098 mutex_lock(&trace_types_lock);
5099
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005100 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005101 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005102 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005103 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005104 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005105 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005106 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005107
5108 filp->private_data = info;
5109
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005110 mutex_unlock(&trace_types_lock);
5111
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005112 ret = nonseekable_open(inode, filp);
5113 if (ret < 0)
5114 trace_array_put(tr);
5115
5116 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005117}
5118
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005119static unsigned int
5120tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5121{
5122 struct ftrace_buffer_info *info = filp->private_data;
5123 struct trace_iterator *iter = &info->iter;
5124
5125 return trace_poll(iter, filp, poll_table);
5126}
5127
Steven Rostedt2cadf912008-12-01 22:20:19 -05005128static ssize_t
5129tracing_buffers_read(struct file *filp, char __user *ubuf,
5130 size_t count, loff_t *ppos)
5131{
5132 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005133 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005134 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005135 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005136
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005137 if (!count)
5138 return 0;
5139
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005140 mutex_lock(&trace_types_lock);
5141
5142#ifdef CONFIG_TRACER_MAX_TRACE
5143 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5144 size = -EBUSY;
5145 goto out_unlock;
5146 }
5147#endif
5148
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005149 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005150 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5151 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005152 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005153 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005154 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005155
Steven Rostedt2cadf912008-12-01 22:20:19 -05005156 /* Do we have previous read data to read? */
5157 if (info->read < PAGE_SIZE)
5158 goto read;
5159
Steven Rostedtb6273442013-02-28 13:44:11 -05005160 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005161 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005162 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005163 &info->spare,
5164 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005165 iter->cpu_file, 0);
5166 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005167
5168 if (ret < 0) {
5169 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005170 if ((filp->f_flags & O_NONBLOCK)) {
5171 size = -EAGAIN;
5172 goto out_unlock;
5173 }
5174 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005175 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005176 mutex_lock(&trace_types_lock);
5177 if (signal_pending(current)) {
5178 size = -EINTR;
5179 goto out_unlock;
5180 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005181 goto again;
5182 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005183 size = 0;
5184 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005185 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005186
Steven Rostedt436fc282011-10-14 10:44:25 -04005187 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005188 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005189 size = PAGE_SIZE - info->read;
5190 if (size > count)
5191 size = count;
5192
5193 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005194 if (ret == size) {
5195 size = -EFAULT;
5196 goto out_unlock;
5197 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005198 size -= ret;
5199
Steven Rostedt2cadf912008-12-01 22:20:19 -05005200 *ppos += size;
5201 info->read += size;
5202
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005203 out_unlock:
5204 mutex_unlock(&trace_types_lock);
5205
Steven Rostedt2cadf912008-12-01 22:20:19 -05005206 return size;
5207}
5208
5209static int tracing_buffers_release(struct inode *inode, struct file *file)
5210{
5211 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005212 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005213
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005214 mutex_lock(&trace_types_lock);
5215
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005216 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005217
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005218 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005219 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005220 kfree(info);
5221
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005222 mutex_unlock(&trace_types_lock);
5223
Steven Rostedt2cadf912008-12-01 22:20:19 -05005224 return 0;
5225}
5226
5227struct buffer_ref {
5228 struct ring_buffer *buffer;
5229 void *page;
5230 int ref;
5231};
5232
5233static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5234 struct pipe_buffer *buf)
5235{
5236 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5237
5238 if (--ref->ref)
5239 return;
5240
5241 ring_buffer_free_read_page(ref->buffer, ref->page);
5242 kfree(ref);
5243 buf->private = 0;
5244}
5245
Steven Rostedt2cadf912008-12-01 22:20:19 -05005246static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5247 struct pipe_buffer *buf)
5248{
5249 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5250
5251 ref->ref++;
5252}
5253
5254/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005255static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005256 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005257 .confirm = generic_pipe_buf_confirm,
5258 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005259 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005260 .get = buffer_pipe_buf_get,
5261};
5262
5263/*
5264 * Callback from splice_to_pipe(), if we need to release some pages
5265 * at the end of the spd in case we error'ed out in filling the pipe.
5266 */
5267static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5268{
5269 struct buffer_ref *ref =
5270 (struct buffer_ref *)spd->partial[i].private;
5271
5272 if (--ref->ref)
5273 return;
5274
5275 ring_buffer_free_read_page(ref->buffer, ref->page);
5276 kfree(ref);
5277 spd->partial[i].private = 0;
5278}
5279
5280static ssize_t
5281tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5282 struct pipe_inode_info *pipe, size_t len,
5283 unsigned int flags)
5284{
5285 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005286 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005287 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5288 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005289 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005290 .pages = pages_def,
5291 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005292 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005293 .flags = flags,
5294 .ops = &buffer_pipe_buf_ops,
5295 .spd_release = buffer_spd_release,
5296 };
5297 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005298 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005299 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005300
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005301 mutex_lock(&trace_types_lock);
5302
5303#ifdef CONFIG_TRACER_MAX_TRACE
5304 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5305 ret = -EBUSY;
5306 goto out;
5307 }
5308#endif
5309
5310 if (splice_grow_spd(pipe, &spd)) {
5311 ret = -ENOMEM;
5312 goto out;
5313 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005314
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005315 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005316 ret = -EINVAL;
5317 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005318 }
5319
5320 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005321 if (len < PAGE_SIZE) {
5322 ret = -EINVAL;
5323 goto out;
5324 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005325 len &= PAGE_MASK;
5326 }
5327
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005328 again:
5329 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005330 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005331
Al Viroa786c062014-04-11 12:01:03 -04005332 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005333 struct page *page;
5334 int r;
5335
5336 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5337 if (!ref)
5338 break;
5339
Steven Rostedt7267fa62009-04-29 00:16:21 -04005340 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005341 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005342 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005343 if (!ref->page) {
5344 kfree(ref);
5345 break;
5346 }
5347
5348 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005349 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005350 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005351 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005352 kfree(ref);
5353 break;
5354 }
5355
5356 /*
5357 * zero out any left over data, this is going to
5358 * user land.
5359 */
5360 size = ring_buffer_page_len(ref->page);
5361 if (size < PAGE_SIZE)
5362 memset(ref->page + size, 0, PAGE_SIZE - size);
5363
5364 page = virt_to_page(ref->page);
5365
5366 spd.pages[i] = page;
5367 spd.partial[i].len = PAGE_SIZE;
5368 spd.partial[i].offset = 0;
5369 spd.partial[i].private = (unsigned long)ref;
5370 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005371 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005373 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005374 }
5375
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005376 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005377 spd.nr_pages = i;
5378
5379 /* did we read anything? */
5380 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005381 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005382 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005383 goto out;
5384 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005385 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005386 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005387 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005388 if (signal_pending(current)) {
5389 ret = -EINTR;
5390 goto out;
5391 }
5392 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005393 }
5394
5395 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005396 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005397out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005398 mutex_unlock(&trace_types_lock);
5399
Steven Rostedt2cadf912008-12-01 22:20:19 -05005400 return ret;
5401}
5402
5403static const struct file_operations tracing_buffers_fops = {
5404 .open = tracing_buffers_open,
5405 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005406 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005407 .release = tracing_buffers_release,
5408 .splice_read = tracing_buffers_splice_read,
5409 .llseek = no_llseek,
5410};
5411
Steven Rostedtc8d77182009-04-29 18:03:45 -04005412static ssize_t
5413tracing_stats_read(struct file *filp, char __user *ubuf,
5414 size_t count, loff_t *ppos)
5415{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005416 struct inode *inode = file_inode(filp);
5417 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005419 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005420 struct trace_seq *s;
5421 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005422 unsigned long long t;
5423 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005424
Li Zefane4f2d102009-06-15 10:57:28 +08005425 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005426 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005427 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005428
5429 trace_seq_init(s);
5430
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005431 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005432 trace_seq_printf(s, "entries: %ld\n", cnt);
5433
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005434 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005435 trace_seq_printf(s, "overrun: %ld\n", cnt);
5436
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005437 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005438 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5439
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005440 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005441 trace_seq_printf(s, "bytes: %ld\n", cnt);
5442
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005443 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005444 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005445 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005446 usec_rem = do_div(t, USEC_PER_SEC);
5447 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5448 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005449
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005450 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005451 usec_rem = do_div(t, USEC_PER_SEC);
5452 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5453 } else {
5454 /* counter or tsc mode for trace_clock */
5455 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005456 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005457
5458 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005459 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005460 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005461
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005462 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005463 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5464
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005465 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005466 trace_seq_printf(s, "read events: %ld\n", cnt);
5467
Steven Rostedtc8d77182009-04-29 18:03:45 -04005468 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5469
5470 kfree(s);
5471
5472 return count;
5473}
5474
5475static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005476 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005477 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005478 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005479 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005480};
5481
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005482#ifdef CONFIG_DYNAMIC_FTRACE
5483
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005484int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005485{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005486 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005487}
5488
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005489static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005490tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005491 size_t cnt, loff_t *ppos)
5492{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005493 static char ftrace_dyn_info_buffer[1024];
5494 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005495 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005496 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005497 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005498 int r;
5499
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005500 mutex_lock(&dyn_info_mutex);
5501 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005502
Steven Rostedta26a2a22008-10-31 00:03:22 -04005503 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005504 buf[r++] = '\n';
5505
5506 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5507
5508 mutex_unlock(&dyn_info_mutex);
5509
5510 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005511}
5512
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005513static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005514 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005515 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005516 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005517};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005518#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005519
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005520#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5521static void
5522ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005523{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005524 tracing_snapshot();
5525}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005526
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005527static void
5528ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5529{
5530 unsigned long *count = (long *)data;
5531
5532 if (!*count)
5533 return;
5534
5535 if (*count != -1)
5536 (*count)--;
5537
5538 tracing_snapshot();
5539}
5540
5541static int
5542ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5543 struct ftrace_probe_ops *ops, void *data)
5544{
5545 long count = (long)data;
5546
5547 seq_printf(m, "%ps:", (void *)ip);
5548
5549 seq_printf(m, "snapshot");
5550
5551 if (count == -1)
5552 seq_printf(m, ":unlimited\n");
5553 else
5554 seq_printf(m, ":count=%ld\n", count);
5555
5556 return 0;
5557}
5558
5559static struct ftrace_probe_ops snapshot_probe_ops = {
5560 .func = ftrace_snapshot,
5561 .print = ftrace_snapshot_print,
5562};
5563
5564static struct ftrace_probe_ops snapshot_count_probe_ops = {
5565 .func = ftrace_count_snapshot,
5566 .print = ftrace_snapshot_print,
5567};
5568
5569static int
5570ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5571 char *glob, char *cmd, char *param, int enable)
5572{
5573 struct ftrace_probe_ops *ops;
5574 void *count = (void *)-1;
5575 char *number;
5576 int ret;
5577
5578 /* hash funcs only work with set_ftrace_filter */
5579 if (!enable)
5580 return -EINVAL;
5581
5582 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5583
5584 if (glob[0] == '!') {
5585 unregister_ftrace_function_probe_func(glob+1, ops);
5586 return 0;
5587 }
5588
5589 if (!param)
5590 goto out_reg;
5591
5592 number = strsep(&param, ":");
5593
5594 if (!strlen(number))
5595 goto out_reg;
5596
5597 /*
5598 * We use the callback data field (which is a pointer)
5599 * as our counter.
5600 */
5601 ret = kstrtoul(number, 0, (unsigned long *)&count);
5602 if (ret)
5603 return ret;
5604
5605 out_reg:
5606 ret = register_ftrace_function_probe(glob, ops, count);
5607
5608 if (ret >= 0)
5609 alloc_snapshot(&global_trace);
5610
5611 return ret < 0 ? ret : 0;
5612}
5613
5614static struct ftrace_func_command ftrace_snapshot_cmd = {
5615 .name = "snapshot",
5616 .func = ftrace_trace_snapshot_callback,
5617};
5618
Tom Zanussi38de93a2013-10-24 08:34:18 -05005619static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005620{
5621 return register_ftrace_command(&ftrace_snapshot_cmd);
5622}
5623#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005624static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005625#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005626
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005627struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005628{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005629 if (tr->dir)
5630 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005631
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005632 if (!debugfs_initialized())
5633 return NULL;
5634
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005635 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5636 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005637
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005638 if (!tr->dir)
5639 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005640
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005641 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005642}
5643
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005644struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005645{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005646 return tracing_init_dentry_tr(&global_trace);
5647}
5648
5649static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5650{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005651 struct dentry *d_tracer;
5652
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005653 if (tr->percpu_dir)
5654 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005655
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005656 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005657 if (!d_tracer)
5658 return NULL;
5659
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005660 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005661
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005662 WARN_ONCE(!tr->percpu_dir,
5663 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005664
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005665 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005666}
5667
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005668static struct dentry *
5669trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5670 void *data, long cpu, const struct file_operations *fops)
5671{
5672 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5673
5674 if (ret) /* See tracing_get_cpu() */
5675 ret->d_inode->i_cdev = (void *)(cpu + 1);
5676 return ret;
5677}
5678
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005679static void
5680tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005681{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005682 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005683 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005684 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005685
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005686 if (!d_percpu)
5687 return;
5688
Steven Rostedtdd49a382010-10-20 21:51:26 -04005689 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005690 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5691 if (!d_cpu) {
5692 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5693 return;
5694 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005695
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005696 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005697 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005698 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005699
5700 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005701 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005702 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005703
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005704 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005705 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005706
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005707 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005708 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005709
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005710 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005711 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005712
5713#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005714 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005715 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005716
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005717 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005718 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005719#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005720}
5721
Steven Rostedt60a11772008-05-12 21:20:44 +02005722#ifdef CONFIG_FTRACE_SELFTEST
5723/* Let selftest have access to static functions in this file */
5724#include "trace_selftest.c"
5725#endif
5726
Steven Rostedt577b7852009-02-26 23:43:05 -05005727struct trace_option_dentry {
5728 struct tracer_opt *opt;
5729 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005730 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005731 struct dentry *entry;
5732};
5733
5734static ssize_t
5735trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5736 loff_t *ppos)
5737{
5738 struct trace_option_dentry *topt = filp->private_data;
5739 char *buf;
5740
5741 if (topt->flags->val & topt->opt->bit)
5742 buf = "1\n";
5743 else
5744 buf = "0\n";
5745
5746 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5747}
5748
5749static ssize_t
5750trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5751 loff_t *ppos)
5752{
5753 struct trace_option_dentry *topt = filp->private_data;
5754 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005755 int ret;
5756
Peter Huewe22fe9b52011-06-07 21:58:27 +02005757 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5758 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005759 return ret;
5760
Li Zefan8d18eaa2009-12-08 11:17:06 +08005761 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005762 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005763
5764 if (!!(topt->flags->val & topt->opt->bit) != val) {
5765 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005766 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005767 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005768 mutex_unlock(&trace_types_lock);
5769 if (ret)
5770 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005771 }
5772
5773 *ppos += cnt;
5774
5775 return cnt;
5776}
5777
5778
5779static const struct file_operations trace_options_fops = {
5780 .open = tracing_open_generic,
5781 .read = trace_options_read,
5782 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005783 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005784};
5785
Steven Rostedta8259072009-02-26 22:19:12 -05005786static ssize_t
5787trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5788 loff_t *ppos)
5789{
5790 long index = (long)filp->private_data;
5791 char *buf;
5792
5793 if (trace_flags & (1 << index))
5794 buf = "1\n";
5795 else
5796 buf = "0\n";
5797
5798 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5799}
5800
5801static ssize_t
5802trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5803 loff_t *ppos)
5804{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005805 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005806 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005807 unsigned long val;
5808 int ret;
5809
Peter Huewe22fe9b52011-06-07 21:58:27 +02005810 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5811 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005812 return ret;
5813
Zhaoleif2d84b62009-08-07 18:55:48 +08005814 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005815 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005816
5817 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005819 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005820
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005821 if (ret < 0)
5822 return ret;
5823
Steven Rostedta8259072009-02-26 22:19:12 -05005824 *ppos += cnt;
5825
5826 return cnt;
5827}
5828
Steven Rostedta8259072009-02-26 22:19:12 -05005829static const struct file_operations trace_options_core_fops = {
5830 .open = tracing_open_generic,
5831 .read = trace_options_core_read,
5832 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005833 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005834};
5835
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005836struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005837 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005838 struct dentry *parent,
5839 void *data,
5840 const struct file_operations *fops)
5841{
5842 struct dentry *ret;
5843
5844 ret = debugfs_create_file(name, mode, parent, data, fops);
5845 if (!ret)
5846 pr_warning("Could not create debugfs '%s' entry\n", name);
5847
5848 return ret;
5849}
5850
5851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005852static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005853{
5854 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005855
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005856 if (tr->options)
5857 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005859 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005860 if (!d_tracer)
5861 return NULL;
5862
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005863 tr->options = debugfs_create_dir("options", d_tracer);
5864 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005865 pr_warning("Could not create debugfs directory 'options'\n");
5866 return NULL;
5867 }
5868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005870}
5871
Steven Rostedt577b7852009-02-26 23:43:05 -05005872static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005873create_trace_option_file(struct trace_array *tr,
5874 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005875 struct tracer_flags *flags,
5876 struct tracer_opt *opt)
5877{
5878 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005879
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005880 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005881 if (!t_options)
5882 return;
5883
5884 topt->flags = flags;
5885 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005886 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005887
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005888 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005889 &trace_options_fops);
5890
Steven Rostedt577b7852009-02-26 23:43:05 -05005891}
5892
5893static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005894create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005895{
5896 struct trace_option_dentry *topts;
5897 struct tracer_flags *flags;
5898 struct tracer_opt *opts;
5899 int cnt;
5900
5901 if (!tracer)
5902 return NULL;
5903
5904 flags = tracer->flags;
5905
5906 if (!flags || !flags->opts)
5907 return NULL;
5908
5909 opts = flags->opts;
5910
5911 for (cnt = 0; opts[cnt].name; cnt++)
5912 ;
5913
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005914 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005915 if (!topts)
5916 return NULL;
5917
5918 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005919 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005920 &opts[cnt]);
5921
5922 return topts;
5923}
5924
5925static void
5926destroy_trace_option_files(struct trace_option_dentry *topts)
5927{
5928 int cnt;
5929
5930 if (!topts)
5931 return;
5932
5933 for (cnt = 0; topts[cnt].opt; cnt++) {
5934 if (topts[cnt].entry)
5935 debugfs_remove(topts[cnt].entry);
5936 }
5937
5938 kfree(topts);
5939}
5940
Steven Rostedta8259072009-02-26 22:19:12 -05005941static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005942create_trace_option_core_file(struct trace_array *tr,
5943 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005944{
5945 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005946
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005947 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005948 if (!t_options)
5949 return NULL;
5950
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005951 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005952 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005953}
5954
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005955static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005956{
5957 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005958 int i;
5959
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005960 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005961 if (!t_options)
5962 return;
5963
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005964 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005965 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005966}
5967
Steven Rostedt499e5472012-02-22 15:50:28 -05005968static ssize_t
5969rb_simple_read(struct file *filp, char __user *ubuf,
5970 size_t cnt, loff_t *ppos)
5971{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005972 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005973 char buf[64];
5974 int r;
5975
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005976 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005977 r = sprintf(buf, "%d\n", r);
5978
5979 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5980}
5981
5982static ssize_t
5983rb_simple_write(struct file *filp, const char __user *ubuf,
5984 size_t cnt, loff_t *ppos)
5985{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005986 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005987 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005988 unsigned long val;
5989 int ret;
5990
5991 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5992 if (ret)
5993 return ret;
5994
5995 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005996 mutex_lock(&trace_types_lock);
5997 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005998 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005999 if (tr->current_trace->start)
6000 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006001 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006002 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006003 if (tr->current_trace->stop)
6004 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006005 }
6006 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006007 }
6008
6009 (*ppos)++;
6010
6011 return cnt;
6012}
6013
6014static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006015 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006016 .read = rb_simple_read,
6017 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006018 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006019 .llseek = default_llseek,
6020};
6021
Steven Rostedt277ba042012-08-03 16:10:49 -04006022struct dentry *trace_instance_dir;
6023
6024static void
6025init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6026
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006027static int
6028allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006029{
6030 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006031
6032 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6033
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006034 buf->tr = tr;
6035
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006036 buf->buffer = ring_buffer_alloc(size, rb_flags);
6037 if (!buf->buffer)
6038 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006039
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006040 buf->data = alloc_percpu(struct trace_array_cpu);
6041 if (!buf->data) {
6042 ring_buffer_free(buf->buffer);
6043 return -ENOMEM;
6044 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006045
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006046 /* Allocate the first page for all buffers */
6047 set_buffer_entries(&tr->trace_buffer,
6048 ring_buffer_size(tr->trace_buffer.buffer, 0));
6049
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006050 return 0;
6051}
6052
6053static int allocate_trace_buffers(struct trace_array *tr, int size)
6054{
6055 int ret;
6056
6057 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6058 if (ret)
6059 return ret;
6060
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006061#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006062 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6063 allocate_snapshot ? size : 1);
6064 if (WARN_ON(ret)) {
6065 ring_buffer_free(tr->trace_buffer.buffer);
6066 free_percpu(tr->trace_buffer.data);
6067 return -ENOMEM;
6068 }
6069 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006070
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006071 /*
6072 * Only the top level trace array gets its snapshot allocated
6073 * from the kernel command line.
6074 */
6075 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006076#endif
6077 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006078}
6079
6080static int new_instance_create(const char *name)
6081{
Steven Rostedt277ba042012-08-03 16:10:49 -04006082 struct trace_array *tr;
6083 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006084
6085 mutex_lock(&trace_types_lock);
6086
6087 ret = -EEXIST;
6088 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6089 if (tr->name && strcmp(tr->name, name) == 0)
6090 goto out_unlock;
6091 }
6092
6093 ret = -ENOMEM;
6094 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6095 if (!tr)
6096 goto out_unlock;
6097
6098 tr->name = kstrdup(name, GFP_KERNEL);
6099 if (!tr->name)
6100 goto out_free_tr;
6101
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006102 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6103 goto out_free_tr;
6104
6105 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6106
Steven Rostedt277ba042012-08-03 16:10:49 -04006107 raw_spin_lock_init(&tr->start_lock);
6108
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006109 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6110
Steven Rostedt277ba042012-08-03 16:10:49 -04006111 tr->current_trace = &nop_trace;
6112
6113 INIT_LIST_HEAD(&tr->systems);
6114 INIT_LIST_HEAD(&tr->events);
6115
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006116 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006117 goto out_free_tr;
6118
Steven Rostedt277ba042012-08-03 16:10:49 -04006119 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6120 if (!tr->dir)
6121 goto out_free_tr;
6122
6123 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006124 if (ret) {
6125 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006126 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006127 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006128
6129 init_tracer_debugfs(tr, tr->dir);
6130
6131 list_add(&tr->list, &ftrace_trace_arrays);
6132
6133 mutex_unlock(&trace_types_lock);
6134
6135 return 0;
6136
6137 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006138 if (tr->trace_buffer.buffer)
6139 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006140 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006141 kfree(tr->name);
6142 kfree(tr);
6143
6144 out_unlock:
6145 mutex_unlock(&trace_types_lock);
6146
6147 return ret;
6148
6149}
6150
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006151static int instance_delete(const char *name)
6152{
6153 struct trace_array *tr;
6154 int found = 0;
6155 int ret;
6156
6157 mutex_lock(&trace_types_lock);
6158
6159 ret = -ENODEV;
6160 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6161 if (tr->name && strcmp(tr->name, name) == 0) {
6162 found = 1;
6163 break;
6164 }
6165 }
6166 if (!found)
6167 goto out_unlock;
6168
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006169 ret = -EBUSY;
6170 if (tr->ref)
6171 goto out_unlock;
6172
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006173 list_del(&tr->list);
6174
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006175 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006176 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006177 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006178 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006179 free_percpu(tr->trace_buffer.data);
6180 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006181
6182 kfree(tr->name);
6183 kfree(tr);
6184
6185 ret = 0;
6186
6187 out_unlock:
6188 mutex_unlock(&trace_types_lock);
6189
6190 return ret;
6191}
6192
Steven Rostedt277ba042012-08-03 16:10:49 -04006193static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6194{
6195 struct dentry *parent;
6196 int ret;
6197
6198 /* Paranoid: Make sure the parent is the "instances" directory */
6199 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6200 if (WARN_ON_ONCE(parent != trace_instance_dir))
6201 return -ENOENT;
6202
6203 /*
6204 * The inode mutex is locked, but debugfs_create_dir() will also
6205 * take the mutex. As the instances directory can not be destroyed
6206 * or changed in any other way, it is safe to unlock it, and
6207 * let the dentry try. If two users try to make the same dir at
6208 * the same time, then the new_instance_create() will determine the
6209 * winner.
6210 */
6211 mutex_unlock(&inode->i_mutex);
6212
6213 ret = new_instance_create(dentry->d_iname);
6214
6215 mutex_lock(&inode->i_mutex);
6216
6217 return ret;
6218}
6219
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006220static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6221{
6222 struct dentry *parent;
6223 int ret;
6224
6225 /* Paranoid: Make sure the parent is the "instances" directory */
6226 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6227 if (WARN_ON_ONCE(parent != trace_instance_dir))
6228 return -ENOENT;
6229
6230 /* The caller did a dget() on dentry */
6231 mutex_unlock(&dentry->d_inode->i_mutex);
6232
6233 /*
6234 * The inode mutex is locked, but debugfs_create_dir() will also
6235 * take the mutex. As the instances directory can not be destroyed
6236 * or changed in any other way, it is safe to unlock it, and
6237 * let the dentry try. If two users try to make the same dir at
6238 * the same time, then the instance_delete() will determine the
6239 * winner.
6240 */
6241 mutex_unlock(&inode->i_mutex);
6242
6243 ret = instance_delete(dentry->d_iname);
6244
6245 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6246 mutex_lock(&dentry->d_inode->i_mutex);
6247
6248 return ret;
6249}
6250
Steven Rostedt277ba042012-08-03 16:10:49 -04006251static const struct inode_operations instance_dir_inode_operations = {
6252 .lookup = simple_lookup,
6253 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006254 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006255};
6256
6257static __init void create_trace_instances(struct dentry *d_tracer)
6258{
6259 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6260 if (WARN_ON(!trace_instance_dir))
6261 return;
6262
6263 /* Hijack the dir inode operations, to allow mkdir */
6264 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6265}
6266
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006267static void
6268init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6269{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006270 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006271
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006272 trace_create_file("available_tracers", 0444, d_tracer,
6273 tr, &show_traces_fops);
6274
6275 trace_create_file("current_tracer", 0644, d_tracer,
6276 tr, &set_tracer_fops);
6277
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006278 trace_create_file("tracing_cpumask", 0644, d_tracer,
6279 tr, &tracing_cpumask_fops);
6280
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006281 trace_create_file("trace_options", 0644, d_tracer,
6282 tr, &tracing_iter_fops);
6283
6284 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006285 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006286
6287 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006288 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006289
6290 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006291 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006292
6293 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6294 tr, &tracing_total_entries_fops);
6295
Wang YanQing238ae932013-05-26 16:52:01 +08006296 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006297 tr, &tracing_free_buffer_fops);
6298
6299 trace_create_file("trace_marker", 0220, d_tracer,
6300 tr, &tracing_mark_fops);
6301
6302 trace_create_file("trace_clock", 0644, d_tracer, tr,
6303 &trace_clock_fops);
6304
6305 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006306 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006307
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006308#ifdef CONFIG_TRACER_MAX_TRACE
6309 trace_create_file("tracing_max_latency", 0644, d_tracer,
6310 &tr->max_latency, &tracing_max_lat_fops);
6311#endif
6312
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006313 if (ftrace_create_function_files(tr, d_tracer))
6314 WARN(1, "Could not allocate function filter files");
6315
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006316#ifdef CONFIG_TRACER_SNAPSHOT
6317 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006318 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006319#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006320
6321 for_each_tracing_cpu(cpu)
6322 tracing_init_debugfs_percpu(tr, cpu);
6323
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006324}
6325
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006326static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006327{
6328 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006329
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006330 trace_access_lock_init();
6331
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006332 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006333 if (!d_tracer)
6334 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006335
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006336 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006337
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006338 trace_create_file("tracing_thresh", 0644, d_tracer,
6339 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006340
Li Zefan339ae5d2009-04-17 10:34:30 +08006341 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006342 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006343
Avadh Patel69abe6a2009-04-10 16:04:48 -04006344 trace_create_file("saved_cmdlines", 0444, d_tracer,
6345 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006346
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006347#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006348 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6349 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006350#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006351
Steven Rostedt277ba042012-08-03 16:10:49 -04006352 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006353
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006354 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006355
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006356 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006357}
6358
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006359static int trace_panic_handler(struct notifier_block *this,
6360 unsigned long event, void *unused)
6361{
Steven Rostedt944ac422008-10-23 19:26:08 -04006362 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006363 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006364 return NOTIFY_OK;
6365}
6366
6367static struct notifier_block trace_panic_notifier = {
6368 .notifier_call = trace_panic_handler,
6369 .next = NULL,
6370 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6371};
6372
6373static int trace_die_handler(struct notifier_block *self,
6374 unsigned long val,
6375 void *data)
6376{
6377 switch (val) {
6378 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006379 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006380 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006381 break;
6382 default:
6383 break;
6384 }
6385 return NOTIFY_OK;
6386}
6387
6388static struct notifier_block trace_die_notifier = {
6389 .notifier_call = trace_die_handler,
6390 .priority = 200
6391};
6392
6393/*
6394 * printk is set to max of 1024, we really don't need it that big.
6395 * Nothing should be printing 1000 characters anyway.
6396 */
6397#define TRACE_MAX_PRINT 1000
6398
6399/*
6400 * Define here KERN_TRACE so that we have one place to modify
6401 * it if we decide to change what log level the ftrace dump
6402 * should be at.
6403 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006404#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006405
Jason Wessel955b61e2010-08-05 09:22:23 -05006406void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006407trace_printk_seq(struct trace_seq *s)
6408{
6409 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006410 if (s->len >= TRACE_MAX_PRINT)
6411 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006412
6413 /* should be zero ended, but we are paranoid. */
6414 s->buffer[s->len] = 0;
6415
6416 printk(KERN_TRACE "%s", s->buffer);
6417
Steven Rostedtf9520752009-03-02 14:04:40 -05006418 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006419}
6420
Jason Wessel955b61e2010-08-05 09:22:23 -05006421void trace_init_global_iter(struct trace_iterator *iter)
6422{
6423 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006424 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006425 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006426 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006427
6428 if (iter->trace && iter->trace->open)
6429 iter->trace->open(iter);
6430
6431 /* Annotate start of buffers if we had overruns */
6432 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6433 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6434
6435 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6436 if (trace_clocks[iter->tr->clock_id].in_ns)
6437 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006438}
6439
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006440void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006441{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006442 /* use static because iter can be a bit big for the stack */
6443 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006444 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006445 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006446 unsigned long flags;
6447 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006448
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006449 /* Only allow one dump user at a time. */
6450 if (atomic_inc_return(&dump_running) != 1) {
6451 atomic_dec(&dump_running);
6452 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006453 }
6454
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006455 /*
6456 * Always turn off tracing when we dump.
6457 * We don't need to show trace output of what happens
6458 * between multiple crashes.
6459 *
6460 * If the user does a sysrq-z, then they can re-enable
6461 * tracing with echo 1 > tracing_on.
6462 */
6463 tracing_off();
6464
6465 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006466
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006467 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006468 trace_init_global_iter(&iter);
6469
Steven Rostedtd7690412008-10-01 00:29:53 -04006470 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006471 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006472 }
6473
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006474 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6475
Török Edwinb54d3de2008-11-22 13:28:48 +02006476 /* don't look at user memory in panic mode */
6477 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6478
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006479 switch (oops_dump_mode) {
6480 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006481 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006482 break;
6483 case DUMP_ORIG:
6484 iter.cpu_file = raw_smp_processor_id();
6485 break;
6486 case DUMP_NONE:
6487 goto out_enable;
6488 default:
6489 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006490 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006491 }
6492
6493 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006494
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006495 /* Did function tracer already get disabled? */
6496 if (ftrace_is_dead()) {
6497 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6498 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6499 }
6500
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006501 /*
6502 * We need to stop all tracing on all CPUS to read the
6503 * the next buffer. This is a bit expensive, but is
6504 * not done often. We fill all what we can read,
6505 * and then release the locks again.
6506 */
6507
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006508 while (!trace_empty(&iter)) {
6509
6510 if (!cnt)
6511 printk(KERN_TRACE "---------------------------------\n");
6512
6513 cnt++;
6514
6515 /* reset all but tr, trace, and overruns */
6516 memset(&iter.seq, 0,
6517 sizeof(struct trace_iterator) -
6518 offsetof(struct trace_iterator, seq));
6519 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6520 iter.pos = -1;
6521
Jason Wessel955b61e2010-08-05 09:22:23 -05006522 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006523 int ret;
6524
6525 ret = print_trace_line(&iter);
6526 if (ret != TRACE_TYPE_NO_CONSUME)
6527 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006528 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006529 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006530
6531 trace_printk_seq(&iter.seq);
6532 }
6533
6534 if (!cnt)
6535 printk(KERN_TRACE " (ftrace buffer empty)\n");
6536 else
6537 printk(KERN_TRACE "---------------------------------\n");
6538
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006539 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006540 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006541
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006542 for_each_tracing_cpu(cpu) {
6543 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006544 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006545 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006546 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006547}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006548EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006549
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006550__init static int tracer_alloc_buffers(void)
6551{
Steven Rostedt73c51622009-03-11 13:42:01 -04006552 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306553 int ret = -ENOMEM;
6554
David Sharp750912f2010-12-08 13:46:47 -08006555
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306556 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6557 goto out;
6558
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006559 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306560 goto out_free_buffer_mask;
6561
Steven Rostedt07d777f2011-09-22 14:01:55 -04006562 /* Only allocate trace_printk buffers if a trace_printk exists */
6563 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006564 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006565 trace_printk_init_buffers();
6566
Steven Rostedt73c51622009-03-11 13:42:01 -04006567 /* To save memory, keep the ring buffer size to its minimum */
6568 if (ring_buffer_expanded)
6569 ring_buf_size = trace_buf_size;
6570 else
6571 ring_buf_size = 1;
6572
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306573 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006574 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006575
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006576 raw_spin_lock_init(&global_trace.start_lock);
6577
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006578 /* Used for event triggers */
6579 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6580 if (!temp_buffer)
6581 goto out_free_cpumask;
6582
Steven Rostedtab464282008-05-12 21:21:00 +02006583 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006584 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006585 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6586 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006587 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006588 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006589
Steven Rostedt499e5472012-02-22 15:50:28 -05006590 if (global_trace.buffer_disabled)
6591 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006592
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006593 trace_init_cmdlines();
6594
Steven Rostedte1e232c2014-02-10 23:38:46 -05006595 if (trace_boot_clock) {
6596 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6597 if (ret < 0)
6598 pr_warning("Trace clock %s not defined, going back to default\n",
6599 trace_boot_clock);
6600 }
6601
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006602 /*
6603 * register_tracer() might reference current_trace, so it
6604 * needs to be set before we register anything. This is
6605 * just a bootstrap of current_trace anyway.
6606 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006607 global_trace.current_trace = &nop_trace;
6608
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006609 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6610
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006611 ftrace_init_global_array_ops(&global_trace);
6612
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006613 register_tracer(&nop_trace);
6614
Steven Rostedt60a11772008-05-12 21:20:44 +02006615 /* All seems OK, enable tracing */
6616 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006617
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006618 atomic_notifier_chain_register(&panic_notifier_list,
6619 &trace_panic_notifier);
6620
6621 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006622
Steven Rostedtae63b312012-05-03 23:09:03 -04006623 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6624
6625 INIT_LIST_HEAD(&global_trace.systems);
6626 INIT_LIST_HEAD(&global_trace.events);
6627 list_add(&global_trace.list, &ftrace_trace_arrays);
6628
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006629 while (trace_boot_options) {
6630 char *option;
6631
6632 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006633 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006634 }
6635
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006636 register_snapshot_cmd();
6637
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006638 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006639
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006640out_free_temp_buffer:
6641 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306642out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006643 free_percpu(global_trace.trace_buffer.data);
6644#ifdef CONFIG_TRACER_MAX_TRACE
6645 free_percpu(global_trace.max_buffer.data);
6646#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006647 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306648out_free_buffer_mask:
6649 free_cpumask_var(tracing_buffer_mask);
6650out:
6651 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006652}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006653
6654__init static int clear_boot_tracer(void)
6655{
6656 /*
6657 * The default tracer at boot buffer is an init section.
6658 * This function is called in lateinit. If we did not
6659 * find the boot tracer, then clear it out, to prevent
6660 * later registration from accessing the buffer that is
6661 * about to be freed.
6662 */
6663 if (!default_bootup_tracer)
6664 return 0;
6665
6666 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6667 default_bootup_tracer);
6668 default_bootup_tracer = NULL;
6669
6670 return 0;
6671}
6672
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006673early_initcall(tracer_alloc_buffers);
6674fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006675late_initcall(clear_boot_tracer);