blob: 16f7038d1f4d7a245a3350cf9b647992072e94e0 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04001088static void wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1092 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001288#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001291struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1295 int cmdline_idx;
1296 char *saved_cmdlines;
1297};
1298static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001299
Steven Rostedt25b0b442008-05-12 21:21:00 +02001300/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001301static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001302
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001303static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001304{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1306}
1307
1308static inline void set_cmdline(int idx, const char *cmdline)
1309{
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1311}
1312
1313static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1315{
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1317 GFP_KERNEL);
1318 if (!s->map_cmdline_to_pid)
1319 return -ENOMEM;
1320
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1324 return -ENOMEM;
1325 }
1326
1327 s->cmdline_idx = 0;
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1333
1334 return 0;
1335}
1336
1337static int trace_create_savedcmd(void)
1338{
1339 int ret;
1340
1341 savedcmd = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
1342 if (!savedcmd)
1343 return -ENOMEM;
1344
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1346 if (ret < 0) {
1347 kfree(savedcmd);
1348 savedcmd = NULL;
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001353}
1354
Carsten Emdeb5130b12009-09-13 01:43:07 +02001355int is_tracing_stopped(void)
1356{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001358}
1359
Steven Rostedt0f048702008-11-05 16:05:44 -05001360/**
1361 * tracing_start - quick start of the tracer
1362 *
1363 * If tracing is enabled but was stopped by tracing_stop,
1364 * this will start the tracer back up.
1365 */
1366void tracing_start(void)
1367{
1368 struct ring_buffer *buffer;
1369 unsigned long flags;
1370
1371 if (tracing_disabled)
1372 return;
1373
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001374 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1375 if (--global_trace.stop_count) {
1376 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001377 /* Someone screwed up their debugging */
1378 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001380 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001381 goto out;
1382 }
1383
Steven Rostedta2f80712010-03-12 19:56:00 -05001384 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001385 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001386
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001388 if (buffer)
1389 ring_buffer_record_enable(buffer);
1390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001391#ifdef CONFIG_TRACER_MAX_TRACE
1392 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001395#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001396
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001397 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001398
Steven Rostedt0f048702008-11-05 16:05:44 -05001399 ftrace_start();
1400 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001401 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1402}
1403
1404static void tracing_start_tr(struct trace_array *tr)
1405{
1406 struct ring_buffer *buffer;
1407 unsigned long flags;
1408
1409 if (tracing_disabled)
1410 return;
1411
1412 /* If global, we need to also start the max tracer */
1413 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1414 return tracing_start();
1415
1416 raw_spin_lock_irqsave(&tr->start_lock, flags);
1417
1418 if (--tr->stop_count) {
1419 if (tr->stop_count < 0) {
1420 /* Someone screwed up their debugging */
1421 WARN_ON_ONCE(1);
1422 tr->stop_count = 0;
1423 }
1424 goto out;
1425 }
1426
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001427 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001428 if (buffer)
1429 ring_buffer_record_enable(buffer);
1430
1431 out:
1432 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001433}
1434
1435/**
1436 * tracing_stop - quick stop of the tracer
1437 *
1438 * Light weight way to stop tracing. Use in conjunction with
1439 * tracing_start.
1440 */
1441void tracing_stop(void)
1442{
1443 struct ring_buffer *buffer;
1444 unsigned long flags;
1445
1446 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001447 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1448 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001449 goto out;
1450
Steven Rostedta2f80712010-03-12 19:56:00 -05001451 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001452 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001454 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001455 if (buffer)
1456 ring_buffer_record_disable(buffer);
1457
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001458#ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001460 if (buffer)
1461 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001462#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001463
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001464 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001465
Steven Rostedt0f048702008-11-05 16:05:44 -05001466 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1468}
1469
1470static void tracing_stop_tr(struct trace_array *tr)
1471{
1472 struct ring_buffer *buffer;
1473 unsigned long flags;
1474
1475 /* If global, we need to also stop the max tracer */
1476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1477 return tracing_stop();
1478
1479 raw_spin_lock_irqsave(&tr->start_lock, flags);
1480 if (tr->stop_count++)
1481 goto out;
1482
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001483 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001484 if (buffer)
1485 ring_buffer_record_disable(buffer);
1486
1487 out:
1488 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001489}
1490
Ingo Molnare309b412008-05-12 21:20:51 +02001491void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001492
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001493static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494{
Carsten Emdea635cf02009-03-18 09:00:41 +01001495 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001496
1497 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001498 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
1500 /*
1501 * It's not the end of the world if we don't get
1502 * the lock, but we also don't want to spin
1503 * nor do we want to disable interrupts,
1504 * so if we miss here, then better luck next time.
1505 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001506 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001507 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001510 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512
Carsten Emdea635cf02009-03-18 09:00:41 +01001513 /*
1514 * Check whether the cmdline buffer at idx has a pid
1515 * mapped. We are going to overwrite that entry so we
1516 * need to clear the map_pid_to_cmdline. Otherwise we
1517 * would read the new comm for the old pid.
1518 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001519 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001520 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001521 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001523 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1524 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 }
1528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001531 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001532
1533 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534}
1535
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001536static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001538 unsigned map;
1539
Steven Rostedt4ca53082009-03-16 19:20:15 -04001540 if (!pid) {
1541 strcpy(comm, "<idle>");
1542 return;
1543 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544
Steven Rostedt74bf4072010-01-25 15:11:53 -05001545 if (WARN_ON_ONCE(pid < 0)) {
1546 strcpy(comm, "<XXX>");
1547 return;
1548 }
1549
Steven Rostedt4ca53082009-03-16 19:20:15 -04001550 if (pid > PID_MAX_DEFAULT) {
1551 strcpy(comm, "<...>");
1552 return;
1553 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001554
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001555 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001556 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001557 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001558 else
1559 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001560}
1561
1562void trace_find_cmdline(int pid, char comm[])
1563{
1564 preempt_disable();
1565 arch_spin_lock(&trace_cmdline_lock);
1566
1567 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001569 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001570 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571}
1572
Ingo Molnare309b412008-05-12 21:20:51 +02001573void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001575 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576 return;
1577
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001578 if (!__this_cpu_read(trace_cmdline_save))
1579 return;
1580
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001581 if (trace_save_cmdline(tsk))
1582 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001583}
1584
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001585void
Steven Rostedt38697052008-10-01 13:14:09 -04001586tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1587 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588{
1589 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001590
Steven Rostedt777e2082008-09-29 23:02:42 -04001591 entry->preempt_count = pc & 0xff;
1592 entry->pid = (tsk) ? tsk->pid : 0;
1593 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001594#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001595 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001596#else
1597 TRACE_FLAG_IRQS_NOSUPPORT |
1598#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001599 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1600 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001601 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1602 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001604EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001605
Steven Rostedte77405a2009-09-02 14:17:06 -04001606struct ring_buffer_event *
1607trace_buffer_lock_reserve(struct ring_buffer *buffer,
1608 int type,
1609 unsigned long len,
1610 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001611{
1612 struct ring_buffer_event *event;
1613
Steven Rostedte77405a2009-09-02 14:17:06 -04001614 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001615 if (event != NULL) {
1616 struct trace_entry *ent = ring_buffer_event_data(event);
1617
1618 tracing_generic_entry_update(ent, flags, pc);
1619 ent->type = type;
1620 }
1621
1622 return event;
1623}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001624
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001625void
1626__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1627{
1628 __this_cpu_write(trace_cmdline_save, true);
1629 ring_buffer_unlock_commit(buffer, event);
1630}
1631
Steven Rostedte77405a2009-09-02 14:17:06 -04001632static inline void
1633__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1634 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001635 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001636{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001637 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001638
Steven Rostedte77405a2009-09-02 14:17:06 -04001639 ftrace_trace_stack(buffer, flags, 6, pc);
1640 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001641}
1642
Steven Rostedte77405a2009-09-02 14:17:06 -04001643void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1644 struct ring_buffer_event *event,
1645 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001646{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001647 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001648}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001649EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001650
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001651static struct ring_buffer *temp_buffer;
1652
Steven Rostedtef5580d2009-02-27 19:38:04 -05001653struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001654trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1655 struct ftrace_event_file *ftrace_file,
1656 int type, unsigned long len,
1657 unsigned long flags, int pc)
1658{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001659 struct ring_buffer_event *entry;
1660
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001661 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001663 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001664 /*
1665 * If tracing is off, but we have triggers enabled
1666 * we still need to look at the event data. Use the temp_buffer
1667 * to store the trace event for the tigger to use. It's recusive
1668 * safe and will not be recorded anywhere.
1669 */
1670 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1671 *current_rb = temp_buffer;
1672 entry = trace_buffer_lock_reserve(*current_rb,
1673 type, len, flags, pc);
1674 }
1675 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001676}
1677EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1678
1679struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001680trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1681 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001682 unsigned long flags, int pc)
1683{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001684 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001685 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001686 type, len, flags, pc);
1687}
Steven Rostedt94487d62009-05-05 19:22:53 -04001688EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001689
Steven Rostedte77405a2009-09-02 14:17:06 -04001690void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1691 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692 unsigned long flags, int pc)
1693{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001694 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001695}
Steven Rostedt94487d62009-05-05 19:22:53 -04001696EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001697
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001698void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1699 struct ring_buffer_event *event,
1700 unsigned long flags, int pc,
1701 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001702{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001703 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001704
1705 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1706 ftrace_trace_userstack(buffer, flags, pc);
1707}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001708EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001709
Steven Rostedte77405a2009-09-02 14:17:06 -04001710void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1711 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001712{
Steven Rostedte77405a2009-09-02 14:17:06 -04001713 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001714}
Steven Rostedt12acd472009-04-17 16:01:56 -04001715EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001716
Ingo Molnare309b412008-05-12 21:20:51 +02001717void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001718trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001719 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1720 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001721{
Tom Zanussie1112b42009-03-31 00:48:49 -05001722 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001723 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001724 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001725 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001726
Steven Rostedtd7690412008-10-01 00:29:53 -04001727 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001728 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001729 return;
1730
Steven Rostedte77405a2009-09-02 14:17:06 -04001731 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001732 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001733 if (!event)
1734 return;
1735 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001736 entry->ip = ip;
1737 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001738
Tom Zanussif306cc82013-10-24 08:34:17 -05001739 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001740 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001741}
1742
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001743#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001744
1745#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1746struct ftrace_stack {
1747 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1748};
1749
1750static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1751static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1752
Steven Rostedte77405a2009-09-02 14:17:06 -04001753static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001754 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001755 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001756{
Tom Zanussie1112b42009-03-31 00:48:49 -05001757 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001758 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001759 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001760 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001761 int use_stack;
1762 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763
1764 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001765 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001767 /*
1768 * Since events can happen in NMIs there's no safe way to
1769 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1770 * or NMI comes in, it will just have to use the default
1771 * FTRACE_STACK_SIZE.
1772 */
1773 preempt_disable_notrace();
1774
Shan Wei82146522012-11-19 13:21:01 +08001775 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001776 /*
1777 * We don't need any atomic variables, just a barrier.
1778 * If an interrupt comes in, we don't care, because it would
1779 * have exited and put the counter back to what we want.
1780 * We just need a barrier to keep gcc from moving things
1781 * around.
1782 */
1783 barrier();
1784 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001785 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001786 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1787
1788 if (regs)
1789 save_stack_trace_regs(regs, &trace);
1790 else
1791 save_stack_trace(&trace);
1792
1793 if (trace.nr_entries > size)
1794 size = trace.nr_entries;
1795 } else
1796 /* From now on, use_stack is a boolean */
1797 use_stack = 0;
1798
1799 size *= sizeof(unsigned long);
1800
1801 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1802 sizeof(*entry) + size, flags, pc);
1803 if (!event)
1804 goto out;
1805 entry = ring_buffer_event_data(event);
1806
1807 memset(&entry->caller, 0, size);
1808
1809 if (use_stack)
1810 memcpy(&entry->caller, trace.entries,
1811 trace.nr_entries * sizeof(unsigned long));
1812 else {
1813 trace.max_entries = FTRACE_STACK_ENTRIES;
1814 trace.entries = entry->caller;
1815 if (regs)
1816 save_stack_trace_regs(regs, &trace);
1817 else
1818 save_stack_trace(&trace);
1819 }
1820
1821 entry->size = trace.nr_entries;
1822
Tom Zanussif306cc82013-10-24 08:34:17 -05001823 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001824 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001825
1826 out:
1827 /* Again, don't let gcc optimize things here */
1828 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001829 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001830 preempt_enable_notrace();
1831
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001832}
1833
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001834void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1835 int skip, int pc, struct pt_regs *regs)
1836{
1837 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1838 return;
1839
1840 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1841}
1842
Steven Rostedte77405a2009-09-02 14:17:06 -04001843void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1844 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001845{
1846 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1847 return;
1848
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001849 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001850}
1851
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001852void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1853 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001854{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001855 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001856}
1857
Steven Rostedt03889382009-12-11 09:48:22 -05001858/**
1859 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001860 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001861 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001862void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001863{
1864 unsigned long flags;
1865
1866 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001867 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001868
1869 local_save_flags(flags);
1870
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001871 /*
1872 * Skip 3 more, seems to get us at the caller of
1873 * this function.
1874 */
1875 skip += 3;
1876 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1877 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001878}
1879
Steven Rostedt91e86e52010-11-10 12:56:12 +01001880static DEFINE_PER_CPU(int, user_stack_count);
1881
Steven Rostedte77405a2009-09-02 14:17:06 -04001882void
1883ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001884{
Tom Zanussie1112b42009-03-31 00:48:49 -05001885 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001886 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001887 struct userstack_entry *entry;
1888 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001889
1890 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1891 return;
1892
Steven Rostedtb6345872010-03-12 20:03:30 -05001893 /*
1894 * NMIs can not handle page faults, even with fix ups.
1895 * The save user stack can (and often does) fault.
1896 */
1897 if (unlikely(in_nmi()))
1898 return;
1899
Steven Rostedt91e86e52010-11-10 12:56:12 +01001900 /*
1901 * prevent recursion, since the user stack tracing may
1902 * trigger other kernel events.
1903 */
1904 preempt_disable();
1905 if (__this_cpu_read(user_stack_count))
1906 goto out;
1907
1908 __this_cpu_inc(user_stack_count);
1909
Steven Rostedte77405a2009-09-02 14:17:06 -04001910 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001911 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001912 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001913 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001914 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001915
Steven Rostedt48659d32009-09-11 11:36:23 -04001916 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001917 memset(&entry->caller, 0, sizeof(entry->caller));
1918
1919 trace.nr_entries = 0;
1920 trace.max_entries = FTRACE_STACK_ENTRIES;
1921 trace.skip = 0;
1922 trace.entries = entry->caller;
1923
1924 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001925 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001926 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001927
Li Zefan1dbd1952010-12-09 15:47:56 +08001928 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001929 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930 out:
1931 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001932}
1933
Hannes Eder4fd27352009-02-10 19:44:12 +01001934#ifdef UNUSED
1935static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001936{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001937 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001938}
Hannes Eder4fd27352009-02-10 19:44:12 +01001939#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001940
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001941#endif /* CONFIG_STACKTRACE */
1942
Steven Rostedt07d777f2011-09-22 14:01:55 -04001943/* created for use with alloc_percpu */
1944struct trace_buffer_struct {
1945 char buffer[TRACE_BUF_SIZE];
1946};
1947
1948static struct trace_buffer_struct *trace_percpu_buffer;
1949static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1950static struct trace_buffer_struct *trace_percpu_irq_buffer;
1951static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1952
1953/*
1954 * The buffer used is dependent on the context. There is a per cpu
1955 * buffer for normal context, softirq contex, hard irq context and
1956 * for NMI context. Thise allows for lockless recording.
1957 *
1958 * Note, if the buffers failed to be allocated, then this returns NULL
1959 */
1960static char *get_trace_buf(void)
1961{
1962 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001963
1964 /*
1965 * If we have allocated per cpu buffers, then we do not
1966 * need to do any locking.
1967 */
1968 if (in_nmi())
1969 percpu_buffer = trace_percpu_nmi_buffer;
1970 else if (in_irq())
1971 percpu_buffer = trace_percpu_irq_buffer;
1972 else if (in_softirq())
1973 percpu_buffer = trace_percpu_sirq_buffer;
1974 else
1975 percpu_buffer = trace_percpu_buffer;
1976
1977 if (!percpu_buffer)
1978 return NULL;
1979
Shan Weid8a03492012-11-13 09:53:04 +08001980 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001981}
1982
1983static int alloc_percpu_trace_buffer(void)
1984{
1985 struct trace_buffer_struct *buffers;
1986 struct trace_buffer_struct *sirq_buffers;
1987 struct trace_buffer_struct *irq_buffers;
1988 struct trace_buffer_struct *nmi_buffers;
1989
1990 buffers = alloc_percpu(struct trace_buffer_struct);
1991 if (!buffers)
1992 goto err_warn;
1993
1994 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1995 if (!sirq_buffers)
1996 goto err_sirq;
1997
1998 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1999 if (!irq_buffers)
2000 goto err_irq;
2001
2002 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2003 if (!nmi_buffers)
2004 goto err_nmi;
2005
2006 trace_percpu_buffer = buffers;
2007 trace_percpu_sirq_buffer = sirq_buffers;
2008 trace_percpu_irq_buffer = irq_buffers;
2009 trace_percpu_nmi_buffer = nmi_buffers;
2010
2011 return 0;
2012
2013 err_nmi:
2014 free_percpu(irq_buffers);
2015 err_irq:
2016 free_percpu(sirq_buffers);
2017 err_sirq:
2018 free_percpu(buffers);
2019 err_warn:
2020 WARN(1, "Could not allocate percpu trace_printk buffer");
2021 return -ENOMEM;
2022}
2023
Steven Rostedt81698832012-10-11 10:15:05 -04002024static int buffers_allocated;
2025
Steven Rostedt07d777f2011-09-22 14:01:55 -04002026void trace_printk_init_buffers(void)
2027{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002028 if (buffers_allocated)
2029 return;
2030
2031 if (alloc_percpu_trace_buffer())
2032 return;
2033
Steven Rostedt2184db42014-05-28 13:14:40 -04002034 /* trace_printk() is for debug use only. Don't use it in production. */
2035
2036 pr_warning("\n**********************************************************\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2040 pr_warning("** **\n");
2041 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2042 pr_warning("** unsafe for produciton use. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** If you see this message and you are not debugging **\n");
2045 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2048 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002049
Steven Rostedtb382ede62012-10-10 21:44:34 -04002050 /* Expand the buffers to set size */
2051 tracing_update_buffers();
2052
Steven Rostedt07d777f2011-09-22 14:01:55 -04002053 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002054
2055 /*
2056 * trace_printk_init_buffers() can be called by modules.
2057 * If that happens, then we need to start cmdline recording
2058 * directly here. If the global_trace.buffer is already
2059 * allocated here, then this was called by module code.
2060 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002061 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002062 tracing_start_cmdline_record();
2063}
2064
2065void trace_printk_start_comm(void)
2066{
2067 /* Start tracing comms if trace printk is set */
2068 if (!buffers_allocated)
2069 return;
2070 tracing_start_cmdline_record();
2071}
2072
2073static void trace_printk_start_stop_comm(int enabled)
2074{
2075 if (!buffers_allocated)
2076 return;
2077
2078 if (enabled)
2079 tracing_start_cmdline_record();
2080 else
2081 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002082}
2083
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002086 *
2087 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002088int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089{
Tom Zanussie1112b42009-03-31 00:48:49 -05002090 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002091 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002092 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002093 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002094 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002095 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 char *tbuffer;
2097 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098
2099 if (unlikely(tracing_selftest_running || tracing_disabled))
2100 return 0;
2101
2102 /* Don't pollute graph traces with trace_vprintk internals */
2103 pause_graph_tracing();
2104
2105 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002106 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002107
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108 tbuffer = get_trace_buf();
2109 if (!tbuffer) {
2110 len = 0;
2111 goto out;
2112 }
2113
2114 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2115
2116 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002117 goto out;
2118
Steven Rostedt07d777f2011-09-22 14:01:55 -04002119 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002120 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002121 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002122 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2123 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002125 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 entry = ring_buffer_event_data(event);
2127 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128 entry->fmt = fmt;
2129
Steven Rostedt07d777f2011-09-22 14:01:55 -04002130 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002131 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002132 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002133 ftrace_trace_stack(buffer, flags, 6, pc);
2134 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002135
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002137 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138 unpause_graph_tracing();
2139
2140 return len;
2141}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002142EXPORT_SYMBOL_GPL(trace_vbprintk);
2143
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002144static int
2145__trace_array_vprintk(struct ring_buffer *buffer,
2146 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002147{
Tom Zanussie1112b42009-03-31 00:48:49 -05002148 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002149 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002150 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002152 unsigned long flags;
2153 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154
2155 if (tracing_disabled || tracing_selftest_running)
2156 return 0;
2157
Steven Rostedt07d777f2011-09-22 14:01:55 -04002158 /* Don't pollute graph traces with trace_vprintk internals */
2159 pause_graph_tracing();
2160
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002161 pc = preempt_count();
2162 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002163
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164
2165 tbuffer = get_trace_buf();
2166 if (!tbuffer) {
2167 len = 0;
2168 goto out;
2169 }
2170
2171 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2172 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002173 goto out;
2174
Steven Rostedt07d777f2011-09-22 14:01:55 -04002175 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002176 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002177 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002178 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002179 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002180 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002182 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002183
Steven Rostedt07d777f2011-09-22 14:01:55 -04002184 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002185 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002186 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002187 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002189 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002190 out:
2191 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
2194 return len;
2195}
Steven Rostedt659372d2009-09-03 19:11:07 -04002196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002197int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199{
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205{
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220{
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231}
2232
Steven Rostedt659372d2009-09-03 19:11:07 -04002233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
Steven Rostedta813a152009-10-09 01:41:35 -04002235 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002236}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002239static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002240{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
Steven Rostedt5a90f572008-09-03 17:42:51 -04002243 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002246}
2247
Ingo Molnare309b412008-05-12 21:20:51 +02002248static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002251{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002252 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254
Steven Rostedtd7690412008-10-01 00:29:53 -04002255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002259 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002260
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267}
Steven Rostedtd7690412008-10-01 00:29:53 -04002268
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002269static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002275 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002276 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002277 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002279 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002280 int cpu;
2281
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
Steven Rostedtab464282008-05-12 21:21:00 +02002296 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297
2298 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002302
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002306 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 next = ent;
2308 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002310 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002311 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002312 }
2313 }
2314
Steven Rostedt12b5da32012-03-27 10:43:28 -04002315 iter->ent_size = next_size;
2316
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
Steven Rostedtbc21b472010-03-31 19:49:26 -04002323 if (missing_events)
2324 *missing_events = next_lost;
2325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 return next;
2327}
2328
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002332{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002335
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002337void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002338{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002341
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002342 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002343 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002344
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002346}
2347
Ingo Molnare309b412008-05-12 21:20:51 +02002348static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002349{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002351 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352}
2353
Ingo Molnare309b412008-05-12 21:20:51 +02002354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355{
2356 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002358 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002360 WARN_ON_ONCE(iter->leftover);
2361
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002369 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002374 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375
2376 iter->pos = *pos;
2377
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 return ent;
2379}
2380
Jason Wessel955b61e2010-08-05 09:22:23 -05002381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002389
Steven Rostedt6d158a82012-06-27 20:46:14 -04002390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 return;
2393
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002402 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002409}
2410
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002411/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002418 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002419 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 void *p = NULL;
2421 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002422 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002433 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
Steven Rostedtae3b5092013-01-23 15:22:59 -05002448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002451 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002452 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453
Lai Jiangshanac91d852010-03-02 17:54:50 +08002454 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469 }
2470
Lai Jiangshan4f535962009-05-18 19:35:34 +08002471 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002472 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002473 return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002478 struct trace_iterator *iter = m->private;
2479
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002488 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002489 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490}
2491
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495{
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002515 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002516 *entries += count;
2517 }
2518}
2519
Ingo Molnare309b412008-05-12 21:20:51 +02002520static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521{
Michael Ellermana6168352008-08-20 16:36:11 -07002522 seq_puts(m, "# _------=> CPU# \n");
2523 seq_puts(m, "# / _-----=> irqs-off \n");
2524 seq_puts(m, "# | / _----=> need-resched \n");
2525 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2526 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002527 seq_puts(m, "# |||| / delay \n");
2528 seq_puts(m, "# cmd pid ||||| time | caller \n");
2529 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530}
2531
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534 unsigned long total;
2535 unsigned long entries;
2536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541}
2542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002544{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002547 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548}
2549
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002552 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002553 seq_puts(m, "# _-----=> irqs-off\n");
2554 seq_puts(m, "# / _----=> need-resched\n");
2555 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2556 seq_puts(m, "# || / _--=> preempt-depth\n");
2557 seq_puts(m, "# ||| / delay\n");
2558 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2559 seq_puts(m, "# | | | |||| | |\n");
2560}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Jiri Olsa62b915f2010-04-02 19:01:22 +02002562void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002568 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002569 unsigned long entries;
2570 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 const char *name = "preemption";
2572
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002573 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002583 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002585 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002586 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587#if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002591#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "preempt",
2593#else
2594 "unknown",
2595#endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601 seq_puts(m, ")\n");
2602#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002609 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610
2611 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002618 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 }
2620
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622}
2623
Steven Rostedta3097202008-11-07 22:36:02 -05002624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626 struct trace_seq *s = &iter->seq;
2627
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
Rusty Russell44623442009-01-01 10:12:23 +10302634 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002635 return;
2636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002638 return;
2639
Rusty Russell44623442009-01-01 10:12:23 +10302640 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002646}
2647
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649{
Steven Rostedt214023c2008-05-12 21:20:46 +02002650 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002652 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002653 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002655 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002656
Steven Rostedta3097202008-11-07 22:36:02 -05002657 test_cpu_buff_start(iter);
2658
Steven Rostedtf633cef2008-12-23 23:24:13 -05002659 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002660
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002662 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2663 if (!trace_print_lat_context(iter))
2664 goto partial;
2665 } else {
2666 if (!trace_print_context(iter))
2667 goto partial;
2668 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002669 }
2670
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002671 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002673
2674 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2675 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002676
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002677 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002678partial:
2679 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002680}
2681
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002682static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002683{
2684 struct trace_seq *s = &iter->seq;
2685 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002686 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002687
2688 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002689
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002690 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002691 if (!trace_seq_printf(s, "%d %d %llu ",
2692 entry->pid, iter->cpu, iter->ts))
2693 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002694 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002695
Steven Rostedtf633cef2008-12-23 23:24:13 -05002696 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002697 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002698 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002699
2700 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2701 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002702
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002703 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002704partial:
2705 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002706}
2707
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002708static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002709{
2710 struct trace_seq *s = &iter->seq;
2711 unsigned char newline = '\n';
2712 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002713 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002714
2715 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002716
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002717 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2718 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2719 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2720 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2721 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002722
Steven Rostedtf633cef2008-12-23 23:24:13 -05002723 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002724 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002725 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002726 if (ret != TRACE_TYPE_HANDLED)
2727 return ret;
2728 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002729
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002730 SEQ_PUT_FIELD_RET(s, newline);
2731
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002732 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002733}
2734
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002736{
2737 struct trace_seq *s = &iter->seq;
2738 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002739 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002740
2741 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002742
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002743 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2744 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002745 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002746 SEQ_PUT_FIELD_RET(s, iter->ts);
2747 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002748
Steven Rostedtf633cef2008-12-23 23:24:13 -05002749 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002750 return event ? event->funcs->binary(iter, 0, event) :
2751 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002752}
2753
Jiri Olsa62b915f2010-04-02 19:01:22 +02002754int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002756 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002757 int cpu;
2758
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002759 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002760 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002761 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002762 buf_iter = trace_buffer_iter(iter, cpu);
2763 if (buf_iter) {
2764 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002765 return 0;
2766 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002767 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002768 return 0;
2769 }
2770 return 1;
2771 }
2772
Steven Rostedtab464282008-05-12 21:21:00 +02002773 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002774 buf_iter = trace_buffer_iter(iter, cpu);
2775 if (buf_iter) {
2776 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002777 return 0;
2778 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002779 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002780 return 0;
2781 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002782 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002783
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002784 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002785}
2786
Lai Jiangshan4f535962009-05-18 19:35:34 +08002787/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002788enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002789{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002790 enum print_line_t ret;
2791
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002792 if (iter->lost_events &&
2793 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794 iter->cpu, iter->lost_events))
2795 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002796
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002797 if (iter->trace && iter->trace->print_line) {
2798 ret = iter->trace->print_line(iter);
2799 if (ret != TRACE_TYPE_UNHANDLED)
2800 return ret;
2801 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002802
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002803 if (iter->ent->type == TRACE_BPUTS &&
2804 trace_flags & TRACE_ITER_PRINTK &&
2805 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2806 return trace_print_bputs_msg_only(iter);
2807
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002808 if (iter->ent->type == TRACE_BPRINT &&
2809 trace_flags & TRACE_ITER_PRINTK &&
2810 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002811 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002812
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002813 if (iter->ent->type == TRACE_PRINT &&
2814 trace_flags & TRACE_ITER_PRINTK &&
2815 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002816 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002817
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002818 if (trace_flags & TRACE_ITER_BIN)
2819 return print_bin_fmt(iter);
2820
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002821 if (trace_flags & TRACE_ITER_HEX)
2822 return print_hex_fmt(iter);
2823
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002824 if (trace_flags & TRACE_ITER_RAW)
2825 return print_raw_fmt(iter);
2826
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002827 return print_trace_fmt(iter);
2828}
2829
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002830void trace_latency_header(struct seq_file *m)
2831{
2832 struct trace_iterator *iter = m->private;
2833
2834 /* print nothing if the buffers are empty */
2835 if (trace_empty(iter))
2836 return;
2837
2838 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2839 print_trace_header(m, iter);
2840
2841 if (!(trace_flags & TRACE_ITER_VERBOSE))
2842 print_lat_help_header(m);
2843}
2844
Jiri Olsa62b915f2010-04-02 19:01:22 +02002845void trace_default_header(struct seq_file *m)
2846{
2847 struct trace_iterator *iter = m->private;
2848
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002849 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2850 return;
2851
Jiri Olsa62b915f2010-04-02 19:01:22 +02002852 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2853 /* print nothing if the buffers are empty */
2854 if (trace_empty(iter))
2855 return;
2856 print_trace_header(m, iter);
2857 if (!(trace_flags & TRACE_ITER_VERBOSE))
2858 print_lat_help_header(m);
2859 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002860 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2861 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002862 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002863 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002864 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002865 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002866 }
2867}
2868
Steven Rostedte0a413f2011-09-29 21:26:16 -04002869static void test_ftrace_alive(struct seq_file *m)
2870{
2871 if (!ftrace_is_dead())
2872 return;
2873 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2874 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2875}
2876
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002877#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002878static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002879{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2881 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2882 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002883 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002884 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2885 seq_printf(m, "# is not a '0' or '1')\n");
2886}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002887
2888static void show_snapshot_percpu_help(struct seq_file *m)
2889{
2890 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2891#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2892 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2893 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2894#else
2895 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2896 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2897#endif
2898 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2899 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2900 seq_printf(m, "# is not a '0' or '1')\n");
2901}
2902
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002903static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2904{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002905 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002906 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2907 else
2908 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2909
2910 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002911 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2912 show_snapshot_main_help(m);
2913 else
2914 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002915}
2916#else
2917/* Should never be called */
2918static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2919#endif
2920
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002921static int s_show(struct seq_file *m, void *v)
2922{
2923 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002924 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925
2926 if (iter->ent == NULL) {
2927 if (iter->tr) {
2928 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2929 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002930 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002931 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002932 if (iter->snapshot && trace_empty(iter))
2933 print_snapshot_help(m, iter);
2934 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002935 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002936 else
2937 trace_default_header(m);
2938
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002939 } else if (iter->leftover) {
2940 /*
2941 * If we filled the seq_file buffer earlier, we
2942 * want to just show it now.
2943 */
2944 ret = trace_print_seq(m, &iter->seq);
2945
2946 /* ret should this time be zero, but you never know */
2947 iter->leftover = ret;
2948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002950 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002951 ret = trace_print_seq(m, &iter->seq);
2952 /*
2953 * If we overflow the seq_file buffer, then it will
2954 * ask us for this data again at start up.
2955 * Use that instead.
2956 * ret is 0 if seq_file write succeeded.
2957 * -1 otherwise.
2958 */
2959 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960 }
2961
2962 return 0;
2963}
2964
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002965/*
2966 * Should be used after trace_array_get(), trace_types_lock
2967 * ensures that i_cdev was already initialized.
2968 */
2969static inline int tracing_get_cpu(struct inode *inode)
2970{
2971 if (inode->i_cdev) /* See trace_create_cpu_file() */
2972 return (long)inode->i_cdev - 1;
2973 return RING_BUFFER_ALL_CPUS;
2974}
2975
James Morris88e9d342009-09-22 16:43:43 -07002976static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002977 .start = s_start,
2978 .next = s_next,
2979 .stop = s_stop,
2980 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002981};
2982
Ingo Molnare309b412008-05-12 21:20:51 +02002983static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002984__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002986 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002988 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002990 if (tracing_disabled)
2991 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002992
Jiri Olsa50e18b92012-04-25 10:23:39 +02002993 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002994 if (!iter)
2995 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996
Steven Rostedt6d158a82012-06-27 20:46:14 -04002997 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2998 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002999 if (!iter->buffer_iter)
3000 goto release;
3001
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003002 /*
3003 * We make a copy of the current tracer to avoid concurrent
3004 * changes on it while we are reading.
3005 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003006 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003008 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003009 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003011 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003012
Li Zefan79f55992009-06-15 14:58:26 +08003013 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003014 goto fail;
3015
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003016 iter->tr = tr;
3017
3018#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003019 /* Currently only the top directory has a snapshot */
3020 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003021 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003022 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023#endif
3024 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003025 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003026 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003027 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003028 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003029
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003030 /* Notify the tracer early; before we stop tracing. */
3031 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003032 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003033
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003034 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003035 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003036 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3037
David Sharp8be07092012-11-13 12:18:22 -08003038 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003039 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003040 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3041
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003042 /* stop the trace while dumping if we are not opening "snapshot" */
3043 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003044 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003045
Steven Rostedtae3b5092013-01-23 15:22:59 -05003046 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003047 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003048 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003050 }
3051 ring_buffer_read_prepare_sync();
3052 for_each_tracing_cpu(cpu) {
3053 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003054 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003055 }
3056 } else {
3057 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003058 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003059 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003060 ring_buffer_read_prepare_sync();
3061 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003062 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003063 }
3064
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065 mutex_unlock(&trace_types_lock);
3066
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003067 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003068
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003069 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003070 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003071 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003072 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003073release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003074 seq_release_private(inode, file);
3075 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076}
3077
3078int tracing_open_generic(struct inode *inode, struct file *filp)
3079{
Steven Rostedt60a11772008-05-12 21:20:44 +02003080 if (tracing_disabled)
3081 return -ENODEV;
3082
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003083 filp->private_data = inode->i_private;
3084 return 0;
3085}
3086
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003087bool tracing_is_disabled(void)
3088{
3089 return (tracing_disabled) ? true: false;
3090}
3091
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003092/*
3093 * Open and update trace_array ref count.
3094 * Must have the current trace_array passed to it.
3095 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003096static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003097{
3098 struct trace_array *tr = inode->i_private;
3099
3100 if (tracing_disabled)
3101 return -ENODEV;
3102
3103 if (trace_array_get(tr) < 0)
3104 return -ENODEV;
3105
3106 filp->private_data = inode->i_private;
3107
3108 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003109}
3110
Hannes Eder4fd27352009-02-10 19:44:12 +01003111static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003112{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003113 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003114 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003115 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003116 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003117
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003118 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003119 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003120 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003121 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003125 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003126
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003127 for_each_tracing_cpu(cpu) {
3128 if (iter->buffer_iter[cpu])
3129 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3130 }
3131
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132 if (iter->trace && iter->trace->close)
3133 iter->trace->close(iter);
3134
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003135 if (!iter->snapshot)
3136 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003137 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003138
3139 __trace_array_put(tr);
3140
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141 mutex_unlock(&trace_types_lock);
3142
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003143 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003144 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003145 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003146 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003147 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003148
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003149 return 0;
3150}
3151
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003152static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3153{
3154 struct trace_array *tr = inode->i_private;
3155
3156 trace_array_put(tr);
3157 return 0;
3158}
3159
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003160static int tracing_single_release_tr(struct inode *inode, struct file *file)
3161{
3162 struct trace_array *tr = inode->i_private;
3163
3164 trace_array_put(tr);
3165
3166 return single_release(inode, file);
3167}
3168
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169static int tracing_open(struct inode *inode, struct file *file)
3170{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003171 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003172 struct trace_iterator *iter;
3173 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003175 if (trace_array_get(tr) < 0)
3176 return -ENODEV;
3177
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003179 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3180 int cpu = tracing_get_cpu(inode);
3181
3182 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003183 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003184 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003185 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003186 }
3187
3188 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003189 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003190 if (IS_ERR(iter))
3191 ret = PTR_ERR(iter);
3192 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3193 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3194 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003195
3196 if (ret < 0)
3197 trace_array_put(tr);
3198
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003199 return ret;
3200}
3201
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003202/*
3203 * Some tracers are not suitable for instance buffers.
3204 * A tracer is always available for the global array (toplevel)
3205 * or if it explicitly states that it is.
3206 */
3207static bool
3208trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3209{
3210 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3211}
3212
3213/* Find the next tracer that this trace array may use */
3214static struct tracer *
3215get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3216{
3217 while (t && !trace_ok_for_array(t, tr))
3218 t = t->next;
3219
3220 return t;
3221}
3222
Ingo Molnare309b412008-05-12 21:20:51 +02003223static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003224t_next(struct seq_file *m, void *v, loff_t *pos)
3225{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003226 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003227 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003228
3229 (*pos)++;
3230
3231 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003232 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003233
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234 return t;
3235}
3236
3237static void *t_start(struct seq_file *m, loff_t *pos)
3238{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003239 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003240 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241 loff_t l = 0;
3242
3243 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003244
3245 t = get_tracer_for_array(tr, trace_types);
3246 for (; t && l < *pos; t = t_next(m, t, &l))
3247 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003248
3249 return t;
3250}
3251
3252static void t_stop(struct seq_file *m, void *p)
3253{
3254 mutex_unlock(&trace_types_lock);
3255}
3256
3257static int t_show(struct seq_file *m, void *v)
3258{
3259 struct tracer *t = v;
3260
3261 if (!t)
3262 return 0;
3263
3264 seq_printf(m, "%s", t->name);
3265 if (t->next)
3266 seq_putc(m, ' ');
3267 else
3268 seq_putc(m, '\n');
3269
3270 return 0;
3271}
3272
James Morris88e9d342009-09-22 16:43:43 -07003273static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003274 .start = t_start,
3275 .next = t_next,
3276 .stop = t_stop,
3277 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003278};
3279
3280static int show_traces_open(struct inode *inode, struct file *file)
3281{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003282 struct trace_array *tr = inode->i_private;
3283 struct seq_file *m;
3284 int ret;
3285
Steven Rostedt60a11772008-05-12 21:20:44 +02003286 if (tracing_disabled)
3287 return -ENODEV;
3288
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003289 ret = seq_open(file, &show_traces_seq_ops);
3290 if (ret)
3291 return ret;
3292
3293 m = file->private_data;
3294 m->private = tr;
3295
3296 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003297}
3298
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003299static ssize_t
3300tracing_write_stub(struct file *filp, const char __user *ubuf,
3301 size_t count, loff_t *ppos)
3302{
3303 return count;
3304}
3305
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003306loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003307{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003308 int ret;
3309
Slava Pestov364829b2010-11-24 15:13:16 -08003310 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003311 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003312 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003313 file->f_pos = ret = 0;
3314
3315 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003316}
3317
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003318static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003319 .open = tracing_open,
3320 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003321 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003322 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003323 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003324};
3325
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003326static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003327 .open = show_traces_open,
3328 .read = seq_read,
3329 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003330 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003331};
3332
Ingo Molnar36dfe922008-05-12 21:20:52 +02003333/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003334 * The tracer itself will not take this lock, but still we want
3335 * to provide a consistent cpumask to user-space:
3336 */
3337static DEFINE_MUTEX(tracing_cpumask_update_lock);
3338
3339/*
3340 * Temporary storage for the character representation of the
3341 * CPU bitmask (and one more byte for the newline):
3342 */
3343static char mask_str[NR_CPUS + 1];
3344
Ingo Molnarc7078de2008-05-12 21:20:52 +02003345static ssize_t
3346tracing_cpumask_read(struct file *filp, char __user *ubuf,
3347 size_t count, loff_t *ppos)
3348{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003349 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003350 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003351
3352 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003353
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003354 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003355 if (count - len < 2) {
3356 count = -EINVAL;
3357 goto out_err;
3358 }
3359 len += sprintf(mask_str + len, "\n");
3360 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3361
3362out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003363 mutex_unlock(&tracing_cpumask_update_lock);
3364
3365 return count;
3366}
3367
3368static ssize_t
3369tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3370 size_t count, loff_t *ppos)
3371{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003372 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303373 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003374 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303375
3376 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3377 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003378
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303379 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003380 if (err)
3381 goto err_unlock;
3382
Li Zefan215368e2009-06-15 10:56:42 +08003383 mutex_lock(&tracing_cpumask_update_lock);
3384
Steven Rostedta5e25882008-12-02 15:34:05 -05003385 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003386 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003387 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003388 /*
3389 * Increase/decrease the disabled counter if we are
3390 * about to flip a bit in the cpumask:
3391 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003392 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303393 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003394 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3395 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003396 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003397 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303398 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003399 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3400 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003401 }
3402 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003403 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003404 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003405
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003406 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003407
Ingo Molnarc7078de2008-05-12 21:20:52 +02003408 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303409 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003410
Ingo Molnarc7078de2008-05-12 21:20:52 +02003411 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003412
3413err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003414 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003415
3416 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003417}
3418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003419static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003420 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003421 .read = tracing_cpumask_read,
3422 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003423 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003424 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003425};
3426
Li Zefanfdb372e2009-12-08 11:15:59 +08003427static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003429 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003430 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003431 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003432 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003433
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003434 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003435 tracer_flags = tr->current_trace->flags->val;
3436 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003437
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003438 for (i = 0; trace_options[i]; i++) {
3439 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003440 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003441 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003442 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443 }
3444
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003445 for (i = 0; trace_opts[i].name; i++) {
3446 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003447 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003448 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003449 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003451 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452
Li Zefanfdb372e2009-12-08 11:15:59 +08003453 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454}
3455
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003456static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003457 struct tracer_flags *tracer_flags,
3458 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003459{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003460 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003461 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003462
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003463 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003464 if (ret)
3465 return ret;
3466
3467 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003468 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003469 else
Zhaolei77708412009-08-07 18:53:21 +08003470 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003471 return 0;
3472}
3473
Li Zefan8d18eaa2009-12-08 11:17:06 +08003474/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003475static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003476{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003477 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003478 struct tracer_flags *tracer_flags = trace->flags;
3479 struct tracer_opt *opts = NULL;
3480 int i;
3481
3482 for (i = 0; tracer_flags->opts[i].name; i++) {
3483 opts = &tracer_flags->opts[i];
3484
3485 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003486 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003487 }
3488
3489 return -EINVAL;
3490}
3491
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003492/* Some tracers require overwrite to stay enabled */
3493int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3494{
3495 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3496 return -1;
3497
3498 return 0;
3499}
3500
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003501int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003502{
3503 /* do nothing if flag is already set */
3504 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003505 return 0;
3506
3507 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003508 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003509 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003510 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003511
3512 if (enabled)
3513 trace_flags |= mask;
3514 else
3515 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003516
3517 if (mask == TRACE_ITER_RECORD_CMD)
3518 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003519
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003520 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003521 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003522#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003523 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003524#endif
3525 }
Steven Rostedt81698832012-10-11 10:15:05 -04003526
3527 if (mask == TRACE_ITER_PRINTK)
3528 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003529
3530 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003531}
3532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003533static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003535 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003537 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538 int i;
3539
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003540 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541
Li Zefan8d18eaa2009-12-08 11:17:06 +08003542 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003543 neg = 1;
3544 cmp += 2;
3545 }
3546
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003547 mutex_lock(&trace_types_lock);
3548
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003550 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003551 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003552 break;
3553 }
3554 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003555
3556 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003557 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003558 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003559
3560 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003561
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003562 return ret;
3563}
3564
3565static ssize_t
3566tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3567 size_t cnt, loff_t *ppos)
3568{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003569 struct seq_file *m = filp->private_data;
3570 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003571 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003572 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003573
3574 if (cnt >= sizeof(buf))
3575 return -EINVAL;
3576
3577 if (copy_from_user(&buf, ubuf, cnt))
3578 return -EFAULT;
3579
Steven Rostedta8dd2172013-01-09 20:54:17 -05003580 buf[cnt] = 0;
3581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003582 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003583 if (ret < 0)
3584 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003585
Jiri Olsacf8517c2009-10-23 19:36:16 -04003586 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587
3588 return cnt;
3589}
3590
Li Zefanfdb372e2009-12-08 11:15:59 +08003591static int tracing_trace_options_open(struct inode *inode, struct file *file)
3592{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003593 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003594 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003595
Li Zefanfdb372e2009-12-08 11:15:59 +08003596 if (tracing_disabled)
3597 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003598
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003599 if (trace_array_get(tr) < 0)
3600 return -ENODEV;
3601
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003602 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3603 if (ret < 0)
3604 trace_array_put(tr);
3605
3606 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003607}
3608
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003609static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003610 .open = tracing_trace_options_open,
3611 .read = seq_read,
3612 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003613 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003614 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615};
3616
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003617static const char readme_msg[] =
3618 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003619 "# echo 0 > tracing_on : quick way to disable tracing\n"
3620 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3621 " Important files:\n"
3622 " trace\t\t\t- The static contents of the buffer\n"
3623 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3624 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3625 " current_tracer\t- function and latency tracers\n"
3626 " available_tracers\t- list of configured tracers for current_tracer\n"
3627 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3628 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3629 " trace_clock\t\t-change the clock used to order events\n"
3630 " local: Per cpu clock but may not be synced across CPUs\n"
3631 " global: Synced across CPUs but slows tracing down.\n"
3632 " counter: Not a clock, but just an increment\n"
3633 " uptime: Jiffy counter from time of boot\n"
3634 " perf: Same clock that perf events use\n"
3635#ifdef CONFIG_X86_64
3636 " x86-tsc: TSC cycle counter\n"
3637#endif
3638 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3639 " tracing_cpumask\t- Limit which CPUs to trace\n"
3640 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3641 "\t\t\t Remove sub-buffer with rmdir\n"
3642 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3644 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003645 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003646#ifdef CONFIG_DYNAMIC_FTRACE
3647 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3649 "\t\t\t functions\n"
3650 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3651 "\t modules: Can select a group via module\n"
3652 "\t Format: :mod:<module-name>\n"
3653 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3654 "\t triggers: a command to perform when function is hit\n"
3655 "\t Format: <function>:<trigger>[:count]\n"
3656 "\t trigger: traceon, traceoff\n"
3657 "\t\t enable_event:<system>:<event>\n"
3658 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003659#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003660 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003661#endif
3662#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003663 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003664#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003665 "\t\t dump\n"
3666 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003667 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3668 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3669 "\t The first one will disable tracing every time do_fault is hit\n"
3670 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3671 "\t The first time do trap is hit and it disables tracing, the\n"
3672 "\t counter will decrement to 2. If tracing is already disabled,\n"
3673 "\t the counter will not decrement. It only decrements when the\n"
3674 "\t trigger did work\n"
3675 "\t To remove trigger without count:\n"
3676 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3677 "\t To remove trigger with a count:\n"
3678 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003679 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003680 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3681 "\t modules: Can select a group via module command :mod:\n"
3682 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003683#endif /* CONFIG_DYNAMIC_FTRACE */
3684#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003685 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3686 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687#endif
3688#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3689 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3690 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3691#endif
3692#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3694 "\t\t\t snapshot buffer. Read the contents for more\n"
3695 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003697#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698 " stack_trace\t\t- Shows the max stack trace when active\n"
3699 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003700 "\t\t\t Write into this file to reset the max size (trigger a\n"
3701 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003702#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3704 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003705#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003706#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003707 " events/\t\t- Directory containing all trace event subsystems:\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3709 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003710 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3711 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003712 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 " events/<system>/<event>/\t- Directory containing control files for\n"
3714 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3716 " filter\t\t- If set, only events passing filter are traced\n"
3717 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 "\t Format: <trigger>[:count][if <filter>]\n"
3719 "\t trigger: traceon, traceoff\n"
3720 "\t enable_event:<system>:<event>\n"
3721 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003722#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003723 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724#endif
3725#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003726 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3729 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3730 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3731 "\t events/block/block_unplug/trigger\n"
3732 "\t The first disables tracing every time block_unplug is hit.\n"
3733 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3734 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3735 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3736 "\t Like function triggers, the counter is only decremented if it\n"
3737 "\t enabled or disabled tracing.\n"
3738 "\t To remove a trigger without a count:\n"
3739 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3740 "\t To remove a trigger with a count:\n"
3741 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3742 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003743;
3744
3745static ssize_t
3746tracing_readme_read(struct file *filp, char __user *ubuf,
3747 size_t cnt, loff_t *ppos)
3748{
3749 return simple_read_from_buffer(ubuf, cnt, ppos,
3750 readme_msg, strlen(readme_msg));
3751}
3752
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003753static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003754 .open = tracing_open_generic,
3755 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003756 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003757};
3758
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 if (*pos || m->count)
3764 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3769 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003771 continue;
3772
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003773 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774 }
3775
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003776 return NULL;
3777}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003778
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003779static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3780{
3781 void *v;
3782 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003783
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003784 preempt_disable();
3785 arch_spin_lock(&trace_cmdline_lock);
3786
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003787 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003788 while (l <= *pos) {
3789 v = saved_cmdlines_next(m, v, &l);
3790 if (!v)
3791 return NULL;
3792 }
3793
3794 return v;
3795}
3796
3797static void saved_cmdlines_stop(struct seq_file *m, void *v)
3798{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003799 arch_spin_unlock(&trace_cmdline_lock);
3800 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003801}
3802
3803static int saved_cmdlines_show(struct seq_file *m, void *v)
3804{
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3807
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003808 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003809 seq_printf(m, "%d %s\n", *pid, buf);
3810 return 0;
3811}
3812
3813static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3818};
3819
3820static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3821{
3822 if (tracing_disabled)
3823 return -ENODEV;
3824
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003826}
3827
3828static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003829 .open = tracing_saved_cmdlines_open,
3830 .read = seq_read,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003833};
3834
3835static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003836tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3837 size_t cnt, loff_t *ppos)
3838{
3839 char buf[64];
3840 int r;
3841
3842 arch_spin_lock(&trace_cmdline_lock);
3843 r = sprintf(buf, "%u\n", savedcmd->cmdline_num);
3844 arch_spin_unlock(&trace_cmdline_lock);
3845
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3847}
3848
3849static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3850{
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3853 kfree(s);
3854}
3855
3856static int tracing_resize_saved_cmdlines(unsigned int val)
3857{
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3859
3860 s = kmalloc(sizeof(struct saved_cmdlines_buffer), GFP_KERNEL);
3861 if (!s)
3862 return -ENOMEM;
3863
3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3865 kfree(s);
3866 return -ENOMEM;
3867 }
3868
3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3871 savedcmd = s;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3874
3875 return 0;
3876}
3877
3878static ssize_t
3879tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3880 size_t cnt, loff_t *ppos)
3881{
3882 unsigned long val;
3883 int ret;
3884
3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3886 if (ret)
3887 return ret;
3888
3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890 if (!val || val > PID_MAX_DEFAULT)
3891 return -EINVAL;
3892
3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3894 if (ret < 0)
3895 return ret;
3896
3897 *ppos += cnt;
3898
3899 return cnt;
3900}
3901
3902static const struct file_operations tracing_saved_cmdlines_size_fops = {
3903 .open = tracing_open_generic,
3904 .read = tracing_saved_cmdlines_size_read,
3905 .write = tracing_saved_cmdlines_size_write,
3906};
3907
3908static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909tracing_set_trace_read(struct file *filp, char __user *ubuf,
3910 size_t cnt, loff_t *ppos)
3911{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003912 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003913 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914 int r;
3915
3916 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003917 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003918 mutex_unlock(&trace_types_lock);
3919
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003920 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921}
3922
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003923int tracer_init(struct tracer *t, struct trace_array *tr)
3924{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003925 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003926 return t->init(tr);
3927}
3928
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003930{
3931 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003932
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003933 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003934 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935}
3936
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003938/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3940 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941{
3942 int cpu, ret = 0;
3943
3944 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3945 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 ret = ring_buffer_resize(trace_buf->buffer,
3947 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003948 if (ret < 0)
3949 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003950 per_cpu_ptr(trace_buf->data, cpu)->entries =
3951 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003952 }
3953 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954 ret = ring_buffer_resize(trace_buf->buffer,
3955 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003956 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003957 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3958 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003959 }
3960
3961 return ret;
3962}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003963#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003964
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003965static int __tracing_resize_ring_buffer(struct trace_array *tr,
3966 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003967{
3968 int ret;
3969
3970 /*
3971 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003972 * we use the size that was given, and we can forget about
3973 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003974 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003975 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003976
Steven Rostedtb382ede62012-10-10 21:44:34 -04003977 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003978 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003979 return 0;
3980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003982 if (ret < 0)
3983 return ret;
3984
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003985#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003986 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3987 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003988 goto out;
3989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003991 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003992 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3993 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003994 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003995 /*
3996 * AARGH! We are left with different
3997 * size max buffer!!!!
3998 * The max buffer is our "snapshot" buffer.
3999 * When a tracer needs a snapshot (one of the
4000 * latency tracers), it swaps the max buffer
4001 * with the saved snap shot. We succeeded to
4002 * update the size of the main buffer, but failed to
4003 * update the size of the max buffer. But when we tried
4004 * to reset the main buffer to the original size, we
4005 * failed there too. This is very unlikely to
4006 * happen, but if it does, warn and kill all
4007 * tracing.
4008 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004009 WARN_ON(1);
4010 tracing_disabled = 1;
4011 }
4012 return ret;
4013 }
4014
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004015 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004018 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004019
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004020 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004021#endif /* CONFIG_TRACER_MAX_TRACE */
4022
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004023 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004024 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004027
4028 return ret;
4029}
4030
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4032 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004033{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004034 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035
4036 mutex_lock(&trace_types_lock);
4037
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004038 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4039 /* make sure, this cpu is enabled in the mask */
4040 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4041 ret = -EINVAL;
4042 goto out;
4043 }
4044 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004046 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047 if (ret < 0)
4048 ret = -ENOMEM;
4049
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004050out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004051 mutex_unlock(&trace_types_lock);
4052
4053 return ret;
4054}
4055
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004056
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004057/**
4058 * tracing_update_buffers - used by tracing facility to expand ring buffers
4059 *
4060 * To save on memory when the tracing is never used on a system with it
4061 * configured in. The ring buffers are set to a minimum size. But once
4062 * a user starts to use the tracing facility, then they need to grow
4063 * to their default size.
4064 *
4065 * This function is to be called when a tracer is about to be used.
4066 */
4067int tracing_update_buffers(void)
4068{
4069 int ret = 0;
4070
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004071 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004072 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004074 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004075 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004076
4077 return ret;
4078}
4079
Steven Rostedt577b7852009-02-26 23:43:05 -05004080struct trace_option_dentry;
4081
4082static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004083create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004084
4085static void
4086destroy_trace_option_files(struct trace_option_dentry *topts);
4087
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004088/*
4089 * Used to clear out the tracer before deletion of an instance.
4090 * Must have trace_types_lock held.
4091 */
4092static void tracing_set_nop(struct trace_array *tr)
4093{
4094 if (tr->current_trace == &nop_trace)
4095 return;
4096
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004097 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004098
4099 if (tr->current_trace->reset)
4100 tr->current_trace->reset(tr);
4101
4102 tr->current_trace = &nop_trace;
4103}
4104
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004105static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106{
Steven Rostedt577b7852009-02-26 23:43:05 -05004107 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004109#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004110 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004111#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004112 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004114 mutex_lock(&trace_types_lock);
4115
Steven Rostedt73c51622009-03-11 13:42:01 -04004116 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004117 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004118 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004119 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004120 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004121 ret = 0;
4122 }
4123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004124 for (t = trace_types; t; t = t->next) {
4125 if (strcmp(t->name, buf) == 0)
4126 break;
4127 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004128 if (!t) {
4129 ret = -EINVAL;
4130 goto out;
4131 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004132 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004133 goto out;
4134
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004135 /* Some tracers are only allowed for the top level buffer */
4136 if (!trace_ok_for_array(t, tr)) {
4137 ret = -EINVAL;
4138 goto out;
4139 }
4140
Steven Rostedt9f029e82008-11-12 15:24:24 -05004141 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004142
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004143 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004144
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004145 if (tr->current_trace->reset)
4146 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004147
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004148 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004149 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004150
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004151#ifdef CONFIG_TRACER_MAX_TRACE
4152 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004153
4154 if (had_max_tr && !t->use_max_tr) {
4155 /*
4156 * We need to make sure that the update_max_tr sees that
4157 * current_trace changed to nop_trace to keep it from
4158 * swapping the buffers after we resize it.
4159 * The update_max_tr is called from interrupts disabled
4160 * so a synchronized_sched() is sufficient.
4161 */
4162 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004163 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004164 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004165#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004166 /* Currently, only the top instance has options */
4167 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4168 destroy_trace_option_files(topts);
4169 topts = create_trace_option_files(tr, t);
4170 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171
4172#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004173 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004174 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004175 if (ret < 0)
4176 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004177 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004178#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004179
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004180 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004181 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004182 if (ret)
4183 goto out;
4184 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004186 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004187 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004188 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004189 out:
4190 mutex_unlock(&trace_types_lock);
4191
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004192 return ret;
4193}
4194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195static ssize_t
4196tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4197 size_t cnt, loff_t *ppos)
4198{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004199 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004200 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201 int i;
4202 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004203 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004204
Steven Rostedt60063a62008-10-28 10:44:24 -04004205 ret = cnt;
4206
Li Zefanee6c2c12009-09-18 14:06:47 +08004207 if (cnt > MAX_TRACER_SIZE)
4208 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209
4210 if (copy_from_user(&buf, ubuf, cnt))
4211 return -EFAULT;
4212
4213 buf[cnt] = 0;
4214
4215 /* strip ending whitespace. */
4216 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4217 buf[i] = 0;
4218
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004219 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004220 if (err)
4221 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222
Jiri Olsacf8517c2009-10-23 19:36:16 -04004223 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004225 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004226}
4227
4228static ssize_t
4229tracing_max_lat_read(struct file *filp, char __user *ubuf,
4230 size_t cnt, loff_t *ppos)
4231{
4232 unsigned long *ptr = filp->private_data;
4233 char buf[64];
4234 int r;
4235
Steven Rostedtcffae432008-05-12 21:21:00 +02004236 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004238 if (r > sizeof(buf))
4239 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004240 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241}
4242
4243static ssize_t
4244tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4245 size_t cnt, loff_t *ppos)
4246{
Hannes Eder5e398412009-02-10 19:44:34 +01004247 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004248 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004249 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004250
Peter Huewe22fe9b52011-06-07 21:58:27 +02004251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4252 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004253 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004254
4255 *ptr = val * 1000;
4256
4257 return cnt;
4258}
4259
Steven Rostedtb3806b42008-05-12 21:20:46 +02004260static int tracing_open_pipe(struct inode *inode, struct file *filp)
4261{
Oleg Nesterov15544202013-07-23 17:25:57 +02004262 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004263 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004264 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004265
4266 if (tracing_disabled)
4267 return -ENODEV;
4268
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004269 if (trace_array_get(tr) < 0)
4270 return -ENODEV;
4271
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004272 mutex_lock(&trace_types_lock);
4273
Steven Rostedtb3806b42008-05-12 21:20:46 +02004274 /* create a buffer to store the information to pass to userspace */
4275 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004276 if (!iter) {
4277 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004278 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004279 goto out;
4280 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004281
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004282 /*
4283 * We make a copy of the current tracer to avoid concurrent
4284 * changes on it while we are reading.
4285 */
4286 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4287 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004288 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004289 goto fail;
4290 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004291 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004292
4293 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4294 ret = -ENOMEM;
4295 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304296 }
4297
Steven Rostedta3097202008-11-07 22:36:02 -05004298 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304299 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004300
Steven Rostedt112f38a72009-06-01 15:16:05 -04004301 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4302 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4303
David Sharp8be07092012-11-13 12:18:22 -08004304 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004305 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004306 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4307
Oleg Nesterov15544202013-07-23 17:25:57 +02004308 iter->tr = tr;
4309 iter->trace_buffer = &tr->trace_buffer;
4310 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004311 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004312 filp->private_data = iter;
4313
Steven Rostedt107bad82008-05-12 21:21:01 +02004314 if (iter->trace->pipe_open)
4315 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004316
Arnd Bergmannb4447862010-07-07 23:40:11 +02004317 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004318out:
4319 mutex_unlock(&trace_types_lock);
4320 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004321
4322fail:
4323 kfree(iter->trace);
4324 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004325 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004326 mutex_unlock(&trace_types_lock);
4327 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004328}
4329
4330static int tracing_release_pipe(struct inode *inode, struct file *file)
4331{
4332 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004333 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004334
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004335 mutex_lock(&trace_types_lock);
4336
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004337 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004338 iter->trace->pipe_close(iter);
4339
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004340 mutex_unlock(&trace_types_lock);
4341
Rusty Russell44623442009-01-01 10:12:23 +10304342 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004343 mutex_destroy(&iter->mutex);
4344 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004345 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004346
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004347 trace_array_put(tr);
4348
Steven Rostedtb3806b42008-05-12 21:20:46 +02004349 return 0;
4350}
4351
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004352static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004353trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004354{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004355 /* Iterators are static, they should be filled or empty */
4356 if (trace_buffer_iter(iter, iter->cpu_file))
4357 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004358
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004359 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004360 /*
4361 * Always select as readable when in blocking mode
4362 */
4363 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004364 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004365 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004366 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004367}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004368
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004369static unsigned int
4370tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4371{
4372 struct trace_iterator *iter = filp->private_data;
4373
4374 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004375}
4376
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004377/* Must be called with trace_types_lock mutex held. */
4378static int tracing_wait_pipe(struct file *filp)
4379{
4380 struct trace_iterator *iter = filp->private_data;
4381
4382 while (trace_empty(iter)) {
4383
4384 if ((filp->f_flags & O_NONBLOCK)) {
4385 return -EAGAIN;
4386 }
4387
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004388 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004389 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004390 * We still block if tracing is disabled, but we have never
4391 * read anything. This allows a user to cat this file, and
4392 * then enable tracing. But after we have read something,
4393 * we give an EOF when tracing is again disabled.
4394 *
4395 * iter->pos will be 0 if we haven't read anything.
4396 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004397 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004398 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004399
4400 mutex_unlock(&iter->mutex);
4401
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04004402 wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004403
4404 mutex_lock(&iter->mutex);
4405
4406 if (signal_pending(current))
4407 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004408 }
4409
4410 return 1;
4411}
4412
Steven Rostedtb3806b42008-05-12 21:20:46 +02004413/*
4414 * Consumer reader.
4415 */
4416static ssize_t
4417tracing_read_pipe(struct file *filp, char __user *ubuf,
4418 size_t cnt, loff_t *ppos)
4419{
4420 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004421 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004422 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004423
4424 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004425 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4426 if (sret != -EBUSY)
4427 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004428
Steven Rostedtf9520752009-03-02 14:04:40 -05004429 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004430
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004431 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004432 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004433 if (unlikely(iter->trace->name != tr->current_trace->name))
4434 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004435 mutex_unlock(&trace_types_lock);
4436
4437 /*
4438 * Avoid more than one consumer on a single file descriptor
4439 * This is just a matter of traces coherency, the ring buffer itself
4440 * is protected.
4441 */
4442 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004443 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004444 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4445 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004446 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004447 }
4448
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004449waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004450 sret = tracing_wait_pipe(filp);
4451 if (sret <= 0)
4452 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004453
4454 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004455 if (trace_empty(iter)) {
4456 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004457 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004458 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004459
4460 if (cnt >= PAGE_SIZE)
4461 cnt = PAGE_SIZE - 1;
4462
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004463 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004464 memset(&iter->seq, 0,
4465 sizeof(struct trace_iterator) -
4466 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004467 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004468 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004469
Lai Jiangshan4f535962009-05-18 19:35:34 +08004470 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004471 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004472 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004473 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004474 int len = iter->seq.len;
4475
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004476 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004477 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004478 /* don't print partial lines */
4479 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004480 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004481 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004482 if (ret != TRACE_TYPE_NO_CONSUME)
4483 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004484
4485 if (iter->seq.len >= cnt)
4486 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004487
4488 /*
4489 * Setting the full flag means we reached the trace_seq buffer
4490 * size and we should leave by partial output condition above.
4491 * One of the trace_seq_* functions is not used properly.
4492 */
4493 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4494 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004495 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004496 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004497 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004498
Steven Rostedtb3806b42008-05-12 21:20:46 +02004499 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004500 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4501 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004502 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004503
4504 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004505 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004506 * entries, go back to wait for more entries.
4507 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004508 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004509 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004510
Steven Rostedt107bad82008-05-12 21:21:01 +02004511out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004512 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004513
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004514 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004515}
4516
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004517static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4518 unsigned int idx)
4519{
4520 __free_page(spd->pages[idx]);
4521}
4522
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004523static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004524 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004525 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004526 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004527 .steal = generic_pipe_buf_steal,
4528 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004529};
4530
Steven Rostedt34cd4992009-02-09 12:06:29 -05004531static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004532tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004533{
4534 size_t count;
4535 int ret;
4536
4537 /* Seq buffer is page-sized, exactly what we need. */
4538 for (;;) {
4539 count = iter->seq.len;
4540 ret = print_trace_line(iter);
4541 count = iter->seq.len - count;
4542 if (rem < count) {
4543 rem = 0;
4544 iter->seq.len -= count;
4545 break;
4546 }
4547 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4548 iter->seq.len -= count;
4549 break;
4550 }
4551
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004552 if (ret != TRACE_TYPE_NO_CONSUME)
4553 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004554 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004555 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004556 rem = 0;
4557 iter->ent = NULL;
4558 break;
4559 }
4560 }
4561
4562 return rem;
4563}
4564
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004565static ssize_t tracing_splice_read_pipe(struct file *filp,
4566 loff_t *ppos,
4567 struct pipe_inode_info *pipe,
4568 size_t len,
4569 unsigned int flags)
4570{
Jens Axboe35f3d142010-05-20 10:43:18 +02004571 struct page *pages_def[PIPE_DEF_BUFFERS];
4572 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004573 struct trace_iterator *iter = filp->private_data;
4574 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004575 .pages = pages_def,
4576 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004577 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004578 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004579 .flags = flags,
4580 .ops = &tracing_pipe_buf_ops,
4581 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004582 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004583 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004584 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004585 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004586 unsigned int i;
4587
Jens Axboe35f3d142010-05-20 10:43:18 +02004588 if (splice_grow_spd(pipe, &spd))
4589 return -ENOMEM;
4590
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004591 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004592 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004593 if (unlikely(iter->trace->name != tr->current_trace->name))
4594 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004595 mutex_unlock(&trace_types_lock);
4596
4597 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004598
4599 if (iter->trace->splice_read) {
4600 ret = iter->trace->splice_read(iter, filp,
4601 ppos, pipe, len, flags);
4602 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004603 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004604 }
4605
4606 ret = tracing_wait_pipe(filp);
4607 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004608 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004609
Jason Wessel955b61e2010-08-05 09:22:23 -05004610 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004611 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004612 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004613 }
4614
Lai Jiangshan4f535962009-05-18 19:35:34 +08004615 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004616 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004617
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004618 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004619 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004620 spd.pages[i] = alloc_page(GFP_KERNEL);
4621 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004622 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004623
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004624 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004625
4626 /* Copy the data into the page, so we can start over. */
4627 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004628 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629 iter->seq.len);
4630 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004631 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004632 break;
4633 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004634 spd.partial[i].offset = 0;
4635 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004636
Steven Rostedtf9520752009-03-02 14:04:40 -05004637 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004638 }
4639
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004640 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004641 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004642 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004643
4644 spd.nr_pages = i;
4645
Jens Axboe35f3d142010-05-20 10:43:18 +02004646 ret = splice_to_pipe(pipe, &spd);
4647out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004648 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004649 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004650
Steven Rostedt34cd4992009-02-09 12:06:29 -05004651out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004652 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004653 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654}
4655
Steven Rostedta98a3c32008-05-12 21:20:59 +02004656static ssize_t
4657tracing_entries_read(struct file *filp, char __user *ubuf,
4658 size_t cnt, loff_t *ppos)
4659{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004660 struct inode *inode = file_inode(filp);
4661 struct trace_array *tr = inode->i_private;
4662 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004663 char buf[64];
4664 int r = 0;
4665 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004666
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004667 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004668
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004669 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004670 int cpu, buf_size_same;
4671 unsigned long size;
4672
4673 size = 0;
4674 buf_size_same = 1;
4675 /* check if all cpu sizes are same */
4676 for_each_tracing_cpu(cpu) {
4677 /* fill in the size from first enabled cpu */
4678 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004679 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4680 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004681 buf_size_same = 0;
4682 break;
4683 }
4684 }
4685
4686 if (buf_size_same) {
4687 if (!ring_buffer_expanded)
4688 r = sprintf(buf, "%lu (expanded: %lu)\n",
4689 size >> 10,
4690 trace_buf_size >> 10);
4691 else
4692 r = sprintf(buf, "%lu\n", size >> 10);
4693 } else
4694 r = sprintf(buf, "X\n");
4695 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004696 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004697
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004698 mutex_unlock(&trace_types_lock);
4699
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004700 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4701 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004702}
4703
4704static ssize_t
4705tracing_entries_write(struct file *filp, const char __user *ubuf,
4706 size_t cnt, loff_t *ppos)
4707{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004708 struct inode *inode = file_inode(filp);
4709 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004710 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004711 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004712
Peter Huewe22fe9b52011-06-07 21:58:27 +02004713 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4714 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004715 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004716
4717 /* must have at least 1 entry */
4718 if (!val)
4719 return -EINVAL;
4720
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004721 /* value is in KB */
4722 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004723 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004724 if (ret < 0)
4725 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004726
Jiri Olsacf8517c2009-10-23 19:36:16 -04004727 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004728
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004729 return cnt;
4730}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004731
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004732static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004733tracing_total_entries_read(struct file *filp, char __user *ubuf,
4734 size_t cnt, loff_t *ppos)
4735{
4736 struct trace_array *tr = filp->private_data;
4737 char buf[64];
4738 int r, cpu;
4739 unsigned long size = 0, expanded_size = 0;
4740
4741 mutex_lock(&trace_types_lock);
4742 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004743 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004744 if (!ring_buffer_expanded)
4745 expanded_size += trace_buf_size >> 10;
4746 }
4747 if (ring_buffer_expanded)
4748 r = sprintf(buf, "%lu\n", size);
4749 else
4750 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4751 mutex_unlock(&trace_types_lock);
4752
4753 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4754}
4755
4756static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004757tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4758 size_t cnt, loff_t *ppos)
4759{
4760 /*
4761 * There is no need to read what the user has written, this function
4762 * is just to make sure that there is no error when "echo" is used
4763 */
4764
4765 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004766
4767 return cnt;
4768}
4769
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004770static int
4771tracing_free_buffer_release(struct inode *inode, struct file *filp)
4772{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004773 struct trace_array *tr = inode->i_private;
4774
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004775 /* disable tracing ? */
4776 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004777 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004778 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004779 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004780
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004781 trace_array_put(tr);
4782
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004783 return 0;
4784}
4785
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004786static ssize_t
4787tracing_mark_write(struct file *filp, const char __user *ubuf,
4788 size_t cnt, loff_t *fpos)
4789{
Steven Rostedtd696b582011-09-22 11:50:27 -04004790 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004791 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004792 struct ring_buffer_event *event;
4793 struct ring_buffer *buffer;
4794 struct print_entry *entry;
4795 unsigned long irq_flags;
4796 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004797 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004798 int nr_pages = 1;
4799 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004800 int offset;
4801 int size;
4802 int len;
4803 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004804 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004805
Steven Rostedtc76f0692008-11-07 22:36:02 -05004806 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004807 return -EINVAL;
4808
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004809 if (!(trace_flags & TRACE_ITER_MARKERS))
4810 return -EINVAL;
4811
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004812 if (cnt > TRACE_BUF_SIZE)
4813 cnt = TRACE_BUF_SIZE;
4814
Steven Rostedtd696b582011-09-22 11:50:27 -04004815 /*
4816 * Userspace is injecting traces into the kernel trace buffer.
4817 * We want to be as non intrusive as possible.
4818 * To do so, we do not want to allocate any special buffers
4819 * or take any locks, but instead write the userspace data
4820 * straight into the ring buffer.
4821 *
4822 * First we need to pin the userspace buffer into memory,
4823 * which, most likely it is, because it just referenced it.
4824 * But there's no guarantee that it is. By using get_user_pages_fast()
4825 * and kmap_atomic/kunmap_atomic() we can get access to the
4826 * pages directly. We then write the data directly into the
4827 * ring buffer.
4828 */
4829 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004830
Steven Rostedtd696b582011-09-22 11:50:27 -04004831 /* check if we cross pages */
4832 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4833 nr_pages = 2;
4834
4835 offset = addr & (PAGE_SIZE - 1);
4836 addr &= PAGE_MASK;
4837
4838 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4839 if (ret < nr_pages) {
4840 while (--ret >= 0)
4841 put_page(pages[ret]);
4842 written = -EFAULT;
4843 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004844 }
4845
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004846 for (i = 0; i < nr_pages; i++)
4847 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004848
4849 local_save_flags(irq_flags);
4850 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004851 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004852 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4853 irq_flags, preempt_count());
4854 if (!event) {
4855 /* Ring buffer disabled, return as if not open for write */
4856 written = -EBADF;
4857 goto out_unlock;
4858 }
4859
4860 entry = ring_buffer_event_data(event);
4861 entry->ip = _THIS_IP_;
4862
4863 if (nr_pages == 2) {
4864 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004865 memcpy(&entry->buf, map_page[0] + offset, len);
4866 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004867 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004868 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004869
4870 if (entry->buf[cnt - 1] != '\n') {
4871 entry->buf[cnt] = '\n';
4872 entry->buf[cnt + 1] = '\0';
4873 } else
4874 entry->buf[cnt] = '\0';
4875
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004876 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004877
4878 written = cnt;
4879
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004880 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004881
Steven Rostedtd696b582011-09-22 11:50:27 -04004882 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004883 for (i = 0; i < nr_pages; i++){
4884 kunmap_atomic(map_page[i]);
4885 put_page(pages[i]);
4886 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004887 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004888 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004889}
4890
Li Zefan13f16d22009-12-08 11:16:11 +08004891static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004892{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004893 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004894 int i;
4895
4896 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004897 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004898 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004899 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4900 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004901 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004902
Li Zefan13f16d22009-12-08 11:16:11 +08004903 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004904}
4905
Steven Rostedte1e232c2014-02-10 23:38:46 -05004906static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004907{
Zhaolei5079f322009-08-25 16:12:56 +08004908 int i;
4909
Zhaolei5079f322009-08-25 16:12:56 +08004910 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4911 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4912 break;
4913 }
4914 if (i == ARRAY_SIZE(trace_clocks))
4915 return -EINVAL;
4916
Zhaolei5079f322009-08-25 16:12:56 +08004917 mutex_lock(&trace_types_lock);
4918
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004919 tr->clock_id = i;
4920
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004921 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004922
David Sharp60303ed2012-10-11 16:27:52 -07004923 /*
4924 * New clock may not be consistent with the previous clock.
4925 * Reset the buffer so that it doesn't have incomparable timestamps.
4926 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004927 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004928
4929#ifdef CONFIG_TRACER_MAX_TRACE
4930 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4931 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004932 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004933#endif
David Sharp60303ed2012-10-11 16:27:52 -07004934
Zhaolei5079f322009-08-25 16:12:56 +08004935 mutex_unlock(&trace_types_lock);
4936
Steven Rostedte1e232c2014-02-10 23:38:46 -05004937 return 0;
4938}
4939
4940static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4941 size_t cnt, loff_t *fpos)
4942{
4943 struct seq_file *m = filp->private_data;
4944 struct trace_array *tr = m->private;
4945 char buf[64];
4946 const char *clockstr;
4947 int ret;
4948
4949 if (cnt >= sizeof(buf))
4950 return -EINVAL;
4951
4952 if (copy_from_user(&buf, ubuf, cnt))
4953 return -EFAULT;
4954
4955 buf[cnt] = 0;
4956
4957 clockstr = strstrip(buf);
4958
4959 ret = tracing_set_clock(tr, clockstr);
4960 if (ret)
4961 return ret;
4962
Zhaolei5079f322009-08-25 16:12:56 +08004963 *fpos += cnt;
4964
4965 return cnt;
4966}
4967
Li Zefan13f16d22009-12-08 11:16:11 +08004968static int tracing_clock_open(struct inode *inode, struct file *file)
4969{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004970 struct trace_array *tr = inode->i_private;
4971 int ret;
4972
Li Zefan13f16d22009-12-08 11:16:11 +08004973 if (tracing_disabled)
4974 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004975
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004976 if (trace_array_get(tr))
4977 return -ENODEV;
4978
4979 ret = single_open(file, tracing_clock_show, inode->i_private);
4980 if (ret < 0)
4981 trace_array_put(tr);
4982
4983 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004984}
4985
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004986struct ftrace_buffer_info {
4987 struct trace_iterator iter;
4988 void *spare;
4989 unsigned int read;
4990};
4991
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004992#ifdef CONFIG_TRACER_SNAPSHOT
4993static int tracing_snapshot_open(struct inode *inode, struct file *file)
4994{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004995 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004996 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004997 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004998 int ret = 0;
4999
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005000 if (trace_array_get(tr) < 0)
5001 return -ENODEV;
5002
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005003 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005004 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005005 if (IS_ERR(iter))
5006 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005007 } else {
5008 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005009 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005010 m = kzalloc(sizeof(*m), GFP_KERNEL);
5011 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005012 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005013 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5014 if (!iter) {
5015 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005016 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005017 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005018 ret = 0;
5019
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005020 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005021 iter->trace_buffer = &tr->max_buffer;
5022 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005023 m->private = iter;
5024 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005025 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005026out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005027 if (ret < 0)
5028 trace_array_put(tr);
5029
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005030 return ret;
5031}
5032
5033static ssize_t
5034tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5035 loff_t *ppos)
5036{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005037 struct seq_file *m = filp->private_data;
5038 struct trace_iterator *iter = m->private;
5039 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005040 unsigned long val;
5041 int ret;
5042
5043 ret = tracing_update_buffers();
5044 if (ret < 0)
5045 return ret;
5046
5047 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5048 if (ret)
5049 return ret;
5050
5051 mutex_lock(&trace_types_lock);
5052
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005053 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005054 ret = -EBUSY;
5055 goto out;
5056 }
5057
5058 switch (val) {
5059 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005060 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5061 ret = -EINVAL;
5062 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005063 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005064 if (tr->allocated_snapshot)
5065 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005066 break;
5067 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005068/* Only allow per-cpu swap if the ring buffer supports it */
5069#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5070 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5071 ret = -EINVAL;
5072 break;
5073 }
5074#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005075 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005076 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005077 if (ret < 0)
5078 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005079 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005080 local_irq_disable();
5081 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005082 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005083 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005084 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005085 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005086 local_irq_enable();
5087 break;
5088 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005089 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005090 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5091 tracing_reset_online_cpus(&tr->max_buffer);
5092 else
5093 tracing_reset(&tr->max_buffer, iter->cpu_file);
5094 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005095 break;
5096 }
5097
5098 if (ret >= 0) {
5099 *ppos += cnt;
5100 ret = cnt;
5101 }
5102out:
5103 mutex_unlock(&trace_types_lock);
5104 return ret;
5105}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005106
5107static int tracing_snapshot_release(struct inode *inode, struct file *file)
5108{
5109 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005110 int ret;
5111
5112 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005113
5114 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005115 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005116
5117 /* If write only, the seq_file is just a stub */
5118 if (m)
5119 kfree(m->private);
5120 kfree(m);
5121
5122 return 0;
5123}
5124
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005125static int tracing_buffers_open(struct inode *inode, struct file *filp);
5126static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5127 size_t count, loff_t *ppos);
5128static int tracing_buffers_release(struct inode *inode, struct file *file);
5129static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5130 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5131
5132static int snapshot_raw_open(struct inode *inode, struct file *filp)
5133{
5134 struct ftrace_buffer_info *info;
5135 int ret;
5136
5137 ret = tracing_buffers_open(inode, filp);
5138 if (ret < 0)
5139 return ret;
5140
5141 info = filp->private_data;
5142
5143 if (info->iter.trace->use_max_tr) {
5144 tracing_buffers_release(inode, filp);
5145 return -EBUSY;
5146 }
5147
5148 info->iter.snapshot = true;
5149 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5150
5151 return ret;
5152}
5153
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005154#endif /* CONFIG_TRACER_SNAPSHOT */
5155
5156
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005157static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005158 .open = tracing_open_generic,
5159 .read = tracing_max_lat_read,
5160 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005161 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005162};
5163
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005164static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005165 .open = tracing_open_generic,
5166 .read = tracing_set_trace_read,
5167 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005168 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005169};
5170
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005171static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005172 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005173 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005174 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005175 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005176 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005177 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005178};
5179
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005180static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005181 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005182 .read = tracing_entries_read,
5183 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005184 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005185 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005186};
5187
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005188static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005189 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005190 .read = tracing_total_entries_read,
5191 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005192 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005193};
5194
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005195static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005196 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005197 .write = tracing_free_buffer_write,
5198 .release = tracing_free_buffer_release,
5199};
5200
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005201static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005202 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005203 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005204 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005205 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005206};
5207
Zhaolei5079f322009-08-25 16:12:56 +08005208static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005209 .open = tracing_clock_open,
5210 .read = seq_read,
5211 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005212 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005213 .write = tracing_clock_write,
5214};
5215
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005216#ifdef CONFIG_TRACER_SNAPSHOT
5217static const struct file_operations snapshot_fops = {
5218 .open = tracing_snapshot_open,
5219 .read = seq_read,
5220 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005221 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005222 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005223};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005224
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005225static const struct file_operations snapshot_raw_fops = {
5226 .open = snapshot_raw_open,
5227 .read = tracing_buffers_read,
5228 .release = tracing_buffers_release,
5229 .splice_read = tracing_buffers_splice_read,
5230 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005231};
5232
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005233#endif /* CONFIG_TRACER_SNAPSHOT */
5234
Steven Rostedt2cadf912008-12-01 22:20:19 -05005235static int tracing_buffers_open(struct inode *inode, struct file *filp)
5236{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005237 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005238 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005239 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005240
5241 if (tracing_disabled)
5242 return -ENODEV;
5243
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005244 if (trace_array_get(tr) < 0)
5245 return -ENODEV;
5246
Steven Rostedt2cadf912008-12-01 22:20:19 -05005247 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005248 if (!info) {
5249 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005250 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005251 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005252
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005253 mutex_lock(&trace_types_lock);
5254
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005255 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005256 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005257 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005258 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005259 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005260 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005261 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005262
5263 filp->private_data = info;
5264
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005265 mutex_unlock(&trace_types_lock);
5266
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005267 ret = nonseekable_open(inode, filp);
5268 if (ret < 0)
5269 trace_array_put(tr);
5270
5271 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005272}
5273
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005274static unsigned int
5275tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5276{
5277 struct ftrace_buffer_info *info = filp->private_data;
5278 struct trace_iterator *iter = &info->iter;
5279
5280 return trace_poll(iter, filp, poll_table);
5281}
5282
Steven Rostedt2cadf912008-12-01 22:20:19 -05005283static ssize_t
5284tracing_buffers_read(struct file *filp, char __user *ubuf,
5285 size_t count, loff_t *ppos)
5286{
5287 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005288 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005289 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005290 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005291
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005292 if (!count)
5293 return 0;
5294
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005295 mutex_lock(&trace_types_lock);
5296
5297#ifdef CONFIG_TRACER_MAX_TRACE
5298 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5299 size = -EBUSY;
5300 goto out_unlock;
5301 }
5302#endif
5303
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005304 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005305 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5306 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005307 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005308 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005309 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005310
Steven Rostedt2cadf912008-12-01 22:20:19 -05005311 /* Do we have previous read data to read? */
5312 if (info->read < PAGE_SIZE)
5313 goto read;
5314
Steven Rostedtb6273442013-02-28 13:44:11 -05005315 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005316 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005317 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005318 &info->spare,
5319 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005320 iter->cpu_file, 0);
5321 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005322
5323 if (ret < 0) {
5324 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005325 if ((filp->f_flags & O_NONBLOCK)) {
5326 size = -EAGAIN;
5327 goto out_unlock;
5328 }
5329 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005330 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005331 mutex_lock(&trace_types_lock);
5332 if (signal_pending(current)) {
5333 size = -EINTR;
5334 goto out_unlock;
5335 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005336 goto again;
5337 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005338 size = 0;
5339 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005340 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005341
Steven Rostedt436fc282011-10-14 10:44:25 -04005342 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005343 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005344 size = PAGE_SIZE - info->read;
5345 if (size > count)
5346 size = count;
5347
5348 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005349 if (ret == size) {
5350 size = -EFAULT;
5351 goto out_unlock;
5352 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005353 size -= ret;
5354
Steven Rostedt2cadf912008-12-01 22:20:19 -05005355 *ppos += size;
5356 info->read += size;
5357
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005358 out_unlock:
5359 mutex_unlock(&trace_types_lock);
5360
Steven Rostedt2cadf912008-12-01 22:20:19 -05005361 return size;
5362}
5363
5364static int tracing_buffers_release(struct inode *inode, struct file *file)
5365{
5366 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005367 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005368
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005369 mutex_lock(&trace_types_lock);
5370
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005371 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005372
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005373 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005374 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005375 kfree(info);
5376
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005377 mutex_unlock(&trace_types_lock);
5378
Steven Rostedt2cadf912008-12-01 22:20:19 -05005379 return 0;
5380}
5381
5382struct buffer_ref {
5383 struct ring_buffer *buffer;
5384 void *page;
5385 int ref;
5386};
5387
5388static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5389 struct pipe_buffer *buf)
5390{
5391 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5392
5393 if (--ref->ref)
5394 return;
5395
5396 ring_buffer_free_read_page(ref->buffer, ref->page);
5397 kfree(ref);
5398 buf->private = 0;
5399}
5400
Steven Rostedt2cadf912008-12-01 22:20:19 -05005401static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5402 struct pipe_buffer *buf)
5403{
5404 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5405
5406 ref->ref++;
5407}
5408
5409/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005410static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005411 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005412 .confirm = generic_pipe_buf_confirm,
5413 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005414 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005415 .get = buffer_pipe_buf_get,
5416};
5417
5418/*
5419 * Callback from splice_to_pipe(), if we need to release some pages
5420 * at the end of the spd in case we error'ed out in filling the pipe.
5421 */
5422static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5423{
5424 struct buffer_ref *ref =
5425 (struct buffer_ref *)spd->partial[i].private;
5426
5427 if (--ref->ref)
5428 return;
5429
5430 ring_buffer_free_read_page(ref->buffer, ref->page);
5431 kfree(ref);
5432 spd->partial[i].private = 0;
5433}
5434
5435static ssize_t
5436tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5437 struct pipe_inode_info *pipe, size_t len,
5438 unsigned int flags)
5439{
5440 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005441 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005442 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5443 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005444 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005445 .pages = pages_def,
5446 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005447 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005448 .flags = flags,
5449 .ops = &buffer_pipe_buf_ops,
5450 .spd_release = buffer_spd_release,
5451 };
5452 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005453 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005454 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005455
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005456 mutex_lock(&trace_types_lock);
5457
5458#ifdef CONFIG_TRACER_MAX_TRACE
5459 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5460 ret = -EBUSY;
5461 goto out;
5462 }
5463#endif
5464
5465 if (splice_grow_spd(pipe, &spd)) {
5466 ret = -ENOMEM;
5467 goto out;
5468 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005469
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005470 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005471 ret = -EINVAL;
5472 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005473 }
5474
5475 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005476 if (len < PAGE_SIZE) {
5477 ret = -EINVAL;
5478 goto out;
5479 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005480 len &= PAGE_MASK;
5481 }
5482
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005483 again:
5484 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005485 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005486
Al Viroa786c062014-04-11 12:01:03 -04005487 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005488 struct page *page;
5489 int r;
5490
5491 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5492 if (!ref)
5493 break;
5494
Steven Rostedt7267fa62009-04-29 00:16:21 -04005495 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005496 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005497 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005498 if (!ref->page) {
5499 kfree(ref);
5500 break;
5501 }
5502
5503 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005504 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005505 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005506 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005507 kfree(ref);
5508 break;
5509 }
5510
5511 /*
5512 * zero out any left over data, this is going to
5513 * user land.
5514 */
5515 size = ring_buffer_page_len(ref->page);
5516 if (size < PAGE_SIZE)
5517 memset(ref->page + size, 0, PAGE_SIZE - size);
5518
5519 page = virt_to_page(ref->page);
5520
5521 spd.pages[i] = page;
5522 spd.partial[i].len = PAGE_SIZE;
5523 spd.partial[i].offset = 0;
5524 spd.partial[i].private = (unsigned long)ref;
5525 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005526 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005527
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005528 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005529 }
5530
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005531 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005532 spd.nr_pages = i;
5533
5534 /* did we read anything? */
5535 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005536 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005537 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005538 goto out;
5539 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005540 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005541 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005542 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005543 if (signal_pending(current)) {
5544 ret = -EINTR;
5545 goto out;
5546 }
5547 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005548 }
5549
5550 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005551 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005552out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005553 mutex_unlock(&trace_types_lock);
5554
Steven Rostedt2cadf912008-12-01 22:20:19 -05005555 return ret;
5556}
5557
5558static const struct file_operations tracing_buffers_fops = {
5559 .open = tracing_buffers_open,
5560 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005561 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005562 .release = tracing_buffers_release,
5563 .splice_read = tracing_buffers_splice_read,
5564 .llseek = no_llseek,
5565};
5566
Steven Rostedtc8d77182009-04-29 18:03:45 -04005567static ssize_t
5568tracing_stats_read(struct file *filp, char __user *ubuf,
5569 size_t count, loff_t *ppos)
5570{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005571 struct inode *inode = file_inode(filp);
5572 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005573 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005574 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005575 struct trace_seq *s;
5576 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005577 unsigned long long t;
5578 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005579
Li Zefane4f2d102009-06-15 10:57:28 +08005580 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005581 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005582 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005583
5584 trace_seq_init(s);
5585
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005586 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005587 trace_seq_printf(s, "entries: %ld\n", cnt);
5588
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005589 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005590 trace_seq_printf(s, "overrun: %ld\n", cnt);
5591
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005592 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005593 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5594
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005595 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005596 trace_seq_printf(s, "bytes: %ld\n", cnt);
5597
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005598 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005599 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005600 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005601 usec_rem = do_div(t, USEC_PER_SEC);
5602 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5603 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005604
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005605 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005606 usec_rem = do_div(t, USEC_PER_SEC);
5607 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5608 } else {
5609 /* counter or tsc mode for trace_clock */
5610 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005611 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005612
5613 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005614 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005615 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005616
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005617 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005618 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5619
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005620 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005621 trace_seq_printf(s, "read events: %ld\n", cnt);
5622
Steven Rostedtc8d77182009-04-29 18:03:45 -04005623 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5624
5625 kfree(s);
5626
5627 return count;
5628}
5629
5630static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005631 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005632 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005633 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005634 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005635};
5636
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005637#ifdef CONFIG_DYNAMIC_FTRACE
5638
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005639int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005640{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005641 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005642}
5643
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005644static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005645tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005646 size_t cnt, loff_t *ppos)
5647{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005648 static char ftrace_dyn_info_buffer[1024];
5649 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005650 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005651 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005652 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005653 int r;
5654
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005655 mutex_lock(&dyn_info_mutex);
5656 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005657
Steven Rostedta26a2a22008-10-31 00:03:22 -04005658 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005659 buf[r++] = '\n';
5660
5661 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5662
5663 mutex_unlock(&dyn_info_mutex);
5664
5665 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005666}
5667
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005668static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005669 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005670 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005671 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005672};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005673#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005674
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005675#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5676static void
5677ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005678{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005679 tracing_snapshot();
5680}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005681
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005682static void
5683ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5684{
5685 unsigned long *count = (long *)data;
5686
5687 if (!*count)
5688 return;
5689
5690 if (*count != -1)
5691 (*count)--;
5692
5693 tracing_snapshot();
5694}
5695
5696static int
5697ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5698 struct ftrace_probe_ops *ops, void *data)
5699{
5700 long count = (long)data;
5701
5702 seq_printf(m, "%ps:", (void *)ip);
5703
5704 seq_printf(m, "snapshot");
5705
5706 if (count == -1)
5707 seq_printf(m, ":unlimited\n");
5708 else
5709 seq_printf(m, ":count=%ld\n", count);
5710
5711 return 0;
5712}
5713
5714static struct ftrace_probe_ops snapshot_probe_ops = {
5715 .func = ftrace_snapshot,
5716 .print = ftrace_snapshot_print,
5717};
5718
5719static struct ftrace_probe_ops snapshot_count_probe_ops = {
5720 .func = ftrace_count_snapshot,
5721 .print = ftrace_snapshot_print,
5722};
5723
5724static int
5725ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5726 char *glob, char *cmd, char *param, int enable)
5727{
5728 struct ftrace_probe_ops *ops;
5729 void *count = (void *)-1;
5730 char *number;
5731 int ret;
5732
5733 /* hash funcs only work with set_ftrace_filter */
5734 if (!enable)
5735 return -EINVAL;
5736
5737 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5738
5739 if (glob[0] == '!') {
5740 unregister_ftrace_function_probe_func(glob+1, ops);
5741 return 0;
5742 }
5743
5744 if (!param)
5745 goto out_reg;
5746
5747 number = strsep(&param, ":");
5748
5749 if (!strlen(number))
5750 goto out_reg;
5751
5752 /*
5753 * We use the callback data field (which is a pointer)
5754 * as our counter.
5755 */
5756 ret = kstrtoul(number, 0, (unsigned long *)&count);
5757 if (ret)
5758 return ret;
5759
5760 out_reg:
5761 ret = register_ftrace_function_probe(glob, ops, count);
5762
5763 if (ret >= 0)
5764 alloc_snapshot(&global_trace);
5765
5766 return ret < 0 ? ret : 0;
5767}
5768
5769static struct ftrace_func_command ftrace_snapshot_cmd = {
5770 .name = "snapshot",
5771 .func = ftrace_trace_snapshot_callback,
5772};
5773
Tom Zanussi38de93a2013-10-24 08:34:18 -05005774static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005775{
5776 return register_ftrace_command(&ftrace_snapshot_cmd);
5777}
5778#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005779static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005780#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005781
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005782struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005783{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005784 if (tr->dir)
5785 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005786
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005787 if (!debugfs_initialized())
5788 return NULL;
5789
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005790 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5791 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005792
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005793 if (!tr->dir)
5794 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005795
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005796 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005797}
5798
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005799struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005800{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005801 return tracing_init_dentry_tr(&global_trace);
5802}
5803
5804static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5805{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005806 struct dentry *d_tracer;
5807
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005808 if (tr->percpu_dir)
5809 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005810
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005811 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005812 if (!d_tracer)
5813 return NULL;
5814
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005815 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005816
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005817 WARN_ONCE(!tr->percpu_dir,
5818 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005819
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005820 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005821}
5822
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005823static struct dentry *
5824trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5825 void *data, long cpu, const struct file_operations *fops)
5826{
5827 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5828
5829 if (ret) /* See tracing_get_cpu() */
5830 ret->d_inode->i_cdev = (void *)(cpu + 1);
5831 return ret;
5832}
5833
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005834static void
5835tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005836{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005837 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005838 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005839 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005840
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005841 if (!d_percpu)
5842 return;
5843
Steven Rostedtdd49a382010-10-20 21:51:26 -04005844 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005845 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5846 if (!d_cpu) {
5847 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5848 return;
5849 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005850
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005851 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005852 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005853 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005854
5855 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005856 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005857 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005858
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005859 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005860 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005861
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005862 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005863 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005864
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005865 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005866 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005867
5868#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005869 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005870 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005871
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005872 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005873 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005874#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005875}
5876
Steven Rostedt60a11772008-05-12 21:20:44 +02005877#ifdef CONFIG_FTRACE_SELFTEST
5878/* Let selftest have access to static functions in this file */
5879#include "trace_selftest.c"
5880#endif
5881
Steven Rostedt577b7852009-02-26 23:43:05 -05005882struct trace_option_dentry {
5883 struct tracer_opt *opt;
5884 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005885 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005886 struct dentry *entry;
5887};
5888
5889static ssize_t
5890trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5891 loff_t *ppos)
5892{
5893 struct trace_option_dentry *topt = filp->private_data;
5894 char *buf;
5895
5896 if (topt->flags->val & topt->opt->bit)
5897 buf = "1\n";
5898 else
5899 buf = "0\n";
5900
5901 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5902}
5903
5904static ssize_t
5905trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5906 loff_t *ppos)
5907{
5908 struct trace_option_dentry *topt = filp->private_data;
5909 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005910 int ret;
5911
Peter Huewe22fe9b52011-06-07 21:58:27 +02005912 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5913 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005914 return ret;
5915
Li Zefan8d18eaa2009-12-08 11:17:06 +08005916 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005917 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005918
5919 if (!!(topt->flags->val & topt->opt->bit) != val) {
5920 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005921 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005922 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005923 mutex_unlock(&trace_types_lock);
5924 if (ret)
5925 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005926 }
5927
5928 *ppos += cnt;
5929
5930 return cnt;
5931}
5932
5933
5934static const struct file_operations trace_options_fops = {
5935 .open = tracing_open_generic,
5936 .read = trace_options_read,
5937 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005938 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005939};
5940
Steven Rostedta8259072009-02-26 22:19:12 -05005941static ssize_t
5942trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5943 loff_t *ppos)
5944{
5945 long index = (long)filp->private_data;
5946 char *buf;
5947
5948 if (trace_flags & (1 << index))
5949 buf = "1\n";
5950 else
5951 buf = "0\n";
5952
5953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5954}
5955
5956static ssize_t
5957trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5958 loff_t *ppos)
5959{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005960 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005961 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005962 unsigned long val;
5963 int ret;
5964
Peter Huewe22fe9b52011-06-07 21:58:27 +02005965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5966 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005967 return ret;
5968
Zhaoleif2d84b62009-08-07 18:55:48 +08005969 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005970 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005971
5972 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005973 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005974 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005975
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005976 if (ret < 0)
5977 return ret;
5978
Steven Rostedta8259072009-02-26 22:19:12 -05005979 *ppos += cnt;
5980
5981 return cnt;
5982}
5983
Steven Rostedta8259072009-02-26 22:19:12 -05005984static const struct file_operations trace_options_core_fops = {
5985 .open = tracing_open_generic,
5986 .read = trace_options_core_read,
5987 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005988 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005989};
5990
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005991struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005992 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005993 struct dentry *parent,
5994 void *data,
5995 const struct file_operations *fops)
5996{
5997 struct dentry *ret;
5998
5999 ret = debugfs_create_file(name, mode, parent, data, fops);
6000 if (!ret)
6001 pr_warning("Could not create debugfs '%s' entry\n", name);
6002
6003 return ret;
6004}
6005
6006
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006007static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006008{
6009 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006011 if (tr->options)
6012 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006013
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006014 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006015 if (!d_tracer)
6016 return NULL;
6017
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006018 tr->options = debugfs_create_dir("options", d_tracer);
6019 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006020 pr_warning("Could not create debugfs directory 'options'\n");
6021 return NULL;
6022 }
6023
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006024 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006025}
6026
Steven Rostedt577b7852009-02-26 23:43:05 -05006027static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006028create_trace_option_file(struct trace_array *tr,
6029 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006030 struct tracer_flags *flags,
6031 struct tracer_opt *opt)
6032{
6033 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006035 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006036 if (!t_options)
6037 return;
6038
6039 topt->flags = flags;
6040 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006041 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006042
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006043 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006044 &trace_options_fops);
6045
Steven Rostedt577b7852009-02-26 23:43:05 -05006046}
6047
6048static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006049create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006050{
6051 struct trace_option_dentry *topts;
6052 struct tracer_flags *flags;
6053 struct tracer_opt *opts;
6054 int cnt;
6055
6056 if (!tracer)
6057 return NULL;
6058
6059 flags = tracer->flags;
6060
6061 if (!flags || !flags->opts)
6062 return NULL;
6063
6064 opts = flags->opts;
6065
6066 for (cnt = 0; opts[cnt].name; cnt++)
6067 ;
6068
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006069 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006070 if (!topts)
6071 return NULL;
6072
6073 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006075 &opts[cnt]);
6076
6077 return topts;
6078}
6079
6080static void
6081destroy_trace_option_files(struct trace_option_dentry *topts)
6082{
6083 int cnt;
6084
6085 if (!topts)
6086 return;
6087
6088 for (cnt = 0; topts[cnt].opt; cnt++) {
6089 if (topts[cnt].entry)
6090 debugfs_remove(topts[cnt].entry);
6091 }
6092
6093 kfree(topts);
6094}
6095
Steven Rostedta8259072009-02-26 22:19:12 -05006096static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006097create_trace_option_core_file(struct trace_array *tr,
6098 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006099{
6100 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006101
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006102 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006103 if (!t_options)
6104 return NULL;
6105
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006106 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006107 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006108}
6109
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006110static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006111{
6112 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006113 int i;
6114
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006115 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006116 if (!t_options)
6117 return;
6118
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006119 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006120 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006121}
6122
Steven Rostedt499e5472012-02-22 15:50:28 -05006123static ssize_t
6124rb_simple_read(struct file *filp, char __user *ubuf,
6125 size_t cnt, loff_t *ppos)
6126{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006127 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006128 char buf[64];
6129 int r;
6130
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006131 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006132 r = sprintf(buf, "%d\n", r);
6133
6134 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6135}
6136
6137static ssize_t
6138rb_simple_write(struct file *filp, const char __user *ubuf,
6139 size_t cnt, loff_t *ppos)
6140{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006141 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006142 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006143 unsigned long val;
6144 int ret;
6145
6146 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6147 if (ret)
6148 return ret;
6149
6150 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006151 mutex_lock(&trace_types_lock);
6152 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006153 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006154 if (tr->current_trace->start)
6155 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006156 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006157 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006158 if (tr->current_trace->stop)
6159 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006160 }
6161 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006162 }
6163
6164 (*ppos)++;
6165
6166 return cnt;
6167}
6168
6169static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006170 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006171 .read = rb_simple_read,
6172 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006173 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006174 .llseek = default_llseek,
6175};
6176
Steven Rostedt277ba042012-08-03 16:10:49 -04006177struct dentry *trace_instance_dir;
6178
6179static void
6180init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6181
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006182static int
6183allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006184{
6185 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006186
6187 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6188
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006189 buf->tr = tr;
6190
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006191 buf->buffer = ring_buffer_alloc(size, rb_flags);
6192 if (!buf->buffer)
6193 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006194
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006195 buf->data = alloc_percpu(struct trace_array_cpu);
6196 if (!buf->data) {
6197 ring_buffer_free(buf->buffer);
6198 return -ENOMEM;
6199 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006200
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006201 /* Allocate the first page for all buffers */
6202 set_buffer_entries(&tr->trace_buffer,
6203 ring_buffer_size(tr->trace_buffer.buffer, 0));
6204
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006205 return 0;
6206}
6207
6208static int allocate_trace_buffers(struct trace_array *tr, int size)
6209{
6210 int ret;
6211
6212 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6213 if (ret)
6214 return ret;
6215
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006216#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006217 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6218 allocate_snapshot ? size : 1);
6219 if (WARN_ON(ret)) {
6220 ring_buffer_free(tr->trace_buffer.buffer);
6221 free_percpu(tr->trace_buffer.data);
6222 return -ENOMEM;
6223 }
6224 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006225
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006226 /*
6227 * Only the top level trace array gets its snapshot allocated
6228 * from the kernel command line.
6229 */
6230 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006231#endif
6232 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006233}
6234
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006235static void free_trace_buffers(struct trace_array *tr)
6236{
6237 if (!tr)
6238 return;
6239
6240 if (tr->trace_buffer.buffer) {
6241 ring_buffer_free(tr->trace_buffer.buffer);
6242 tr->trace_buffer.buffer = NULL;
6243 free_percpu(tr->trace_buffer.data);
6244 }
6245
6246#ifdef CONFIG_TRACER_MAX_TRACE
6247 if (tr->max_buffer.buffer) {
6248 ring_buffer_free(tr->max_buffer.buffer);
6249 tr->max_buffer.buffer = NULL;
6250 }
6251#endif
6252}
6253
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006254static int new_instance_create(const char *name)
6255{
Steven Rostedt277ba042012-08-03 16:10:49 -04006256 struct trace_array *tr;
6257 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006258
6259 mutex_lock(&trace_types_lock);
6260
6261 ret = -EEXIST;
6262 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6263 if (tr->name && strcmp(tr->name, name) == 0)
6264 goto out_unlock;
6265 }
6266
6267 ret = -ENOMEM;
6268 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6269 if (!tr)
6270 goto out_unlock;
6271
6272 tr->name = kstrdup(name, GFP_KERNEL);
6273 if (!tr->name)
6274 goto out_free_tr;
6275
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006276 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6277 goto out_free_tr;
6278
6279 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6280
Steven Rostedt277ba042012-08-03 16:10:49 -04006281 raw_spin_lock_init(&tr->start_lock);
6282
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006283 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6284
Steven Rostedt277ba042012-08-03 16:10:49 -04006285 tr->current_trace = &nop_trace;
6286
6287 INIT_LIST_HEAD(&tr->systems);
6288 INIT_LIST_HEAD(&tr->events);
6289
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006290 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006291 goto out_free_tr;
6292
Steven Rostedt277ba042012-08-03 16:10:49 -04006293 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6294 if (!tr->dir)
6295 goto out_free_tr;
6296
6297 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006298 if (ret) {
6299 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006300 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006301 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006302
6303 init_tracer_debugfs(tr, tr->dir);
6304
6305 list_add(&tr->list, &ftrace_trace_arrays);
6306
6307 mutex_unlock(&trace_types_lock);
6308
6309 return 0;
6310
6311 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006312 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006313 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006314 kfree(tr->name);
6315 kfree(tr);
6316
6317 out_unlock:
6318 mutex_unlock(&trace_types_lock);
6319
6320 return ret;
6321
6322}
6323
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006324static int instance_delete(const char *name)
6325{
6326 struct trace_array *tr;
6327 int found = 0;
6328 int ret;
6329
6330 mutex_lock(&trace_types_lock);
6331
6332 ret = -ENODEV;
6333 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6334 if (tr->name && strcmp(tr->name, name) == 0) {
6335 found = 1;
6336 break;
6337 }
6338 }
6339 if (!found)
6340 goto out_unlock;
6341
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006342 ret = -EBUSY;
6343 if (tr->ref)
6344 goto out_unlock;
6345
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006346 list_del(&tr->list);
6347
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006348 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006349 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006350 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006351 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006352 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006353
6354 kfree(tr->name);
6355 kfree(tr);
6356
6357 ret = 0;
6358
6359 out_unlock:
6360 mutex_unlock(&trace_types_lock);
6361
6362 return ret;
6363}
6364
Steven Rostedt277ba042012-08-03 16:10:49 -04006365static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6366{
6367 struct dentry *parent;
6368 int ret;
6369
6370 /* Paranoid: Make sure the parent is the "instances" directory */
6371 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6372 if (WARN_ON_ONCE(parent != trace_instance_dir))
6373 return -ENOENT;
6374
6375 /*
6376 * The inode mutex is locked, but debugfs_create_dir() will also
6377 * take the mutex. As the instances directory can not be destroyed
6378 * or changed in any other way, it is safe to unlock it, and
6379 * let the dentry try. If two users try to make the same dir at
6380 * the same time, then the new_instance_create() will determine the
6381 * winner.
6382 */
6383 mutex_unlock(&inode->i_mutex);
6384
6385 ret = new_instance_create(dentry->d_iname);
6386
6387 mutex_lock(&inode->i_mutex);
6388
6389 return ret;
6390}
6391
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006392static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6393{
6394 struct dentry *parent;
6395 int ret;
6396
6397 /* Paranoid: Make sure the parent is the "instances" directory */
6398 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6399 if (WARN_ON_ONCE(parent != trace_instance_dir))
6400 return -ENOENT;
6401
6402 /* The caller did a dget() on dentry */
6403 mutex_unlock(&dentry->d_inode->i_mutex);
6404
6405 /*
6406 * The inode mutex is locked, but debugfs_create_dir() will also
6407 * take the mutex. As the instances directory can not be destroyed
6408 * or changed in any other way, it is safe to unlock it, and
6409 * let the dentry try. If two users try to make the same dir at
6410 * the same time, then the instance_delete() will determine the
6411 * winner.
6412 */
6413 mutex_unlock(&inode->i_mutex);
6414
6415 ret = instance_delete(dentry->d_iname);
6416
6417 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6418 mutex_lock(&dentry->d_inode->i_mutex);
6419
6420 return ret;
6421}
6422
Steven Rostedt277ba042012-08-03 16:10:49 -04006423static const struct inode_operations instance_dir_inode_operations = {
6424 .lookup = simple_lookup,
6425 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006426 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006427};
6428
6429static __init void create_trace_instances(struct dentry *d_tracer)
6430{
6431 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6432 if (WARN_ON(!trace_instance_dir))
6433 return;
6434
6435 /* Hijack the dir inode operations, to allow mkdir */
6436 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6437}
6438
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006439static void
6440init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6441{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006442 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006443
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006444 trace_create_file("available_tracers", 0444, d_tracer,
6445 tr, &show_traces_fops);
6446
6447 trace_create_file("current_tracer", 0644, d_tracer,
6448 tr, &set_tracer_fops);
6449
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006450 trace_create_file("tracing_cpumask", 0644, d_tracer,
6451 tr, &tracing_cpumask_fops);
6452
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006453 trace_create_file("trace_options", 0644, d_tracer,
6454 tr, &tracing_iter_fops);
6455
6456 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006457 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006458
6459 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006460 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006461
6462 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006463 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006464
6465 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6466 tr, &tracing_total_entries_fops);
6467
Wang YanQing238ae932013-05-26 16:52:01 +08006468 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006469 tr, &tracing_free_buffer_fops);
6470
6471 trace_create_file("trace_marker", 0220, d_tracer,
6472 tr, &tracing_mark_fops);
6473
6474 trace_create_file("trace_clock", 0644, d_tracer, tr,
6475 &trace_clock_fops);
6476
6477 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006478 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006479
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006480#ifdef CONFIG_TRACER_MAX_TRACE
6481 trace_create_file("tracing_max_latency", 0644, d_tracer,
6482 &tr->max_latency, &tracing_max_lat_fops);
6483#endif
6484
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006485 if (ftrace_create_function_files(tr, d_tracer))
6486 WARN(1, "Could not allocate function filter files");
6487
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006488#ifdef CONFIG_TRACER_SNAPSHOT
6489 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006490 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006491#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006492
6493 for_each_tracing_cpu(cpu)
6494 tracing_init_debugfs_percpu(tr, cpu);
6495
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006496}
6497
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006498static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006499{
6500 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006501
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006502 trace_access_lock_init();
6503
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006504 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006505 if (!d_tracer)
6506 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006507
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006508 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006509
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006510 trace_create_file("tracing_thresh", 0644, d_tracer,
6511 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006512
Li Zefan339ae5d2009-04-17 10:34:30 +08006513 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006514 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006515
Avadh Patel69abe6a2009-04-10 16:04:48 -04006516 trace_create_file("saved_cmdlines", 0444, d_tracer,
6517 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006518
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006519 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6520 NULL, &tracing_saved_cmdlines_size_fops);
6521
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006522#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006523 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6524 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006525#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006526
Steven Rostedt277ba042012-08-03 16:10:49 -04006527 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006528
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006529 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006530
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006531 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006532}
6533
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006534static int trace_panic_handler(struct notifier_block *this,
6535 unsigned long event, void *unused)
6536{
Steven Rostedt944ac422008-10-23 19:26:08 -04006537 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006538 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006539 return NOTIFY_OK;
6540}
6541
6542static struct notifier_block trace_panic_notifier = {
6543 .notifier_call = trace_panic_handler,
6544 .next = NULL,
6545 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6546};
6547
6548static int trace_die_handler(struct notifier_block *self,
6549 unsigned long val,
6550 void *data)
6551{
6552 switch (val) {
6553 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006554 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006555 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006556 break;
6557 default:
6558 break;
6559 }
6560 return NOTIFY_OK;
6561}
6562
6563static struct notifier_block trace_die_notifier = {
6564 .notifier_call = trace_die_handler,
6565 .priority = 200
6566};
6567
6568/*
6569 * printk is set to max of 1024, we really don't need it that big.
6570 * Nothing should be printing 1000 characters anyway.
6571 */
6572#define TRACE_MAX_PRINT 1000
6573
6574/*
6575 * Define here KERN_TRACE so that we have one place to modify
6576 * it if we decide to change what log level the ftrace dump
6577 * should be at.
6578 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006579#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006580
Jason Wessel955b61e2010-08-05 09:22:23 -05006581void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006582trace_printk_seq(struct trace_seq *s)
6583{
6584 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006585 if (s->len >= TRACE_MAX_PRINT)
6586 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006587
6588 /* should be zero ended, but we are paranoid. */
6589 s->buffer[s->len] = 0;
6590
6591 printk(KERN_TRACE "%s", s->buffer);
6592
Steven Rostedtf9520752009-03-02 14:04:40 -05006593 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006594}
6595
Jason Wessel955b61e2010-08-05 09:22:23 -05006596void trace_init_global_iter(struct trace_iterator *iter)
6597{
6598 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006599 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006600 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006601 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006602
6603 if (iter->trace && iter->trace->open)
6604 iter->trace->open(iter);
6605
6606 /* Annotate start of buffers if we had overruns */
6607 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6608 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6609
6610 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6611 if (trace_clocks[iter->tr->clock_id].in_ns)
6612 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006613}
6614
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006615void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006616{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006617 /* use static because iter can be a bit big for the stack */
6618 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006619 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006620 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006621 unsigned long flags;
6622 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006623
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006624 /* Only allow one dump user at a time. */
6625 if (atomic_inc_return(&dump_running) != 1) {
6626 atomic_dec(&dump_running);
6627 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006628 }
6629
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006630 /*
6631 * Always turn off tracing when we dump.
6632 * We don't need to show trace output of what happens
6633 * between multiple crashes.
6634 *
6635 * If the user does a sysrq-z, then they can re-enable
6636 * tracing with echo 1 > tracing_on.
6637 */
6638 tracing_off();
6639
6640 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006641
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006642 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006643 trace_init_global_iter(&iter);
6644
Steven Rostedtd7690412008-10-01 00:29:53 -04006645 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006646 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006647 }
6648
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006649 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6650
Török Edwinb54d3de2008-11-22 13:28:48 +02006651 /* don't look at user memory in panic mode */
6652 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6653
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006654 switch (oops_dump_mode) {
6655 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006656 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006657 break;
6658 case DUMP_ORIG:
6659 iter.cpu_file = raw_smp_processor_id();
6660 break;
6661 case DUMP_NONE:
6662 goto out_enable;
6663 default:
6664 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006665 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006666 }
6667
6668 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006669
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006670 /* Did function tracer already get disabled? */
6671 if (ftrace_is_dead()) {
6672 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6673 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6674 }
6675
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006676 /*
6677 * We need to stop all tracing on all CPUS to read the
6678 * the next buffer. This is a bit expensive, but is
6679 * not done often. We fill all what we can read,
6680 * and then release the locks again.
6681 */
6682
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006683 while (!trace_empty(&iter)) {
6684
6685 if (!cnt)
6686 printk(KERN_TRACE "---------------------------------\n");
6687
6688 cnt++;
6689
6690 /* reset all but tr, trace, and overruns */
6691 memset(&iter.seq, 0,
6692 sizeof(struct trace_iterator) -
6693 offsetof(struct trace_iterator, seq));
6694 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6695 iter.pos = -1;
6696
Jason Wessel955b61e2010-08-05 09:22:23 -05006697 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006698 int ret;
6699
6700 ret = print_trace_line(&iter);
6701 if (ret != TRACE_TYPE_NO_CONSUME)
6702 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006703 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006704 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006705
6706 trace_printk_seq(&iter.seq);
6707 }
6708
6709 if (!cnt)
6710 printk(KERN_TRACE " (ftrace buffer empty)\n");
6711 else
6712 printk(KERN_TRACE "---------------------------------\n");
6713
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006714 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006715 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006716
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006717 for_each_tracing_cpu(cpu) {
6718 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006719 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006720 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006721 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006722}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006723EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006724
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006725__init static int tracer_alloc_buffers(void)
6726{
Steven Rostedt73c51622009-03-11 13:42:01 -04006727 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306728 int ret = -ENOMEM;
6729
David Sharp750912f2010-12-08 13:46:47 -08006730
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306731 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6732 goto out;
6733
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006734 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306735 goto out_free_buffer_mask;
6736
Steven Rostedt07d777f2011-09-22 14:01:55 -04006737 /* Only allocate trace_printk buffers if a trace_printk exists */
6738 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006739 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006740 trace_printk_init_buffers();
6741
Steven Rostedt73c51622009-03-11 13:42:01 -04006742 /* To save memory, keep the ring buffer size to its minimum */
6743 if (ring_buffer_expanded)
6744 ring_buf_size = trace_buf_size;
6745 else
6746 ring_buf_size = 1;
6747
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306748 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006749 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006750
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006751 raw_spin_lock_init(&global_trace.start_lock);
6752
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006753 /* Used for event triggers */
6754 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6755 if (!temp_buffer)
6756 goto out_free_cpumask;
6757
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006758 if (trace_create_savedcmd() < 0)
6759 goto out_free_temp_buffer;
6760
Steven Rostedtab464282008-05-12 21:21:00 +02006761 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006762 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006763 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6764 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006765 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006766 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006767
Steven Rostedt499e5472012-02-22 15:50:28 -05006768 if (global_trace.buffer_disabled)
6769 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006770
Steven Rostedte1e232c2014-02-10 23:38:46 -05006771 if (trace_boot_clock) {
6772 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6773 if (ret < 0)
6774 pr_warning("Trace clock %s not defined, going back to default\n",
6775 trace_boot_clock);
6776 }
6777
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006778 /*
6779 * register_tracer() might reference current_trace, so it
6780 * needs to be set before we register anything. This is
6781 * just a bootstrap of current_trace anyway.
6782 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006783 global_trace.current_trace = &nop_trace;
6784
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006785 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6786
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006787 ftrace_init_global_array_ops(&global_trace);
6788
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006789 register_tracer(&nop_trace);
6790
Steven Rostedt60a11772008-05-12 21:20:44 +02006791 /* All seems OK, enable tracing */
6792 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006793
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006794 atomic_notifier_chain_register(&panic_notifier_list,
6795 &trace_panic_notifier);
6796
6797 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006798
Steven Rostedtae63b312012-05-03 23:09:03 -04006799 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6800
6801 INIT_LIST_HEAD(&global_trace.systems);
6802 INIT_LIST_HEAD(&global_trace.events);
6803 list_add(&global_trace.list, &ftrace_trace_arrays);
6804
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006805 while (trace_boot_options) {
6806 char *option;
6807
6808 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006809 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006810 }
6811
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006812 register_snapshot_cmd();
6813
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006814 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006815
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006816out_free_savedcmd:
6817 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006818out_free_temp_buffer:
6819 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306820out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006821 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306822out_free_buffer_mask:
6823 free_cpumask_var(tracing_buffer_mask);
6824out:
6825 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006826}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006827
6828__init static int clear_boot_tracer(void)
6829{
6830 /*
6831 * The default tracer at boot buffer is an init section.
6832 * This function is called in lateinit. If we did not
6833 * find the boot tracer, then clear it out, to prevent
6834 * later registration from accessing the buffer that is
6835 * about to be freed.
6836 */
6837 if (!default_bootup_tracer)
6838 return 0;
6839
6840 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6841 default_bootup_tracer);
6842 default_bootup_tracer = NULL;
6843
6844 return 0;
6845}
6846
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006847early_initcall(tracer_alloc_buffers);
6848fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006849late_initcall(clear_boot_tracer);