blob: f243444a37729ae5c17ee0654b2b606acfedaae7 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001088static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001092 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001094 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001288#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001291struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1295 int cmdline_idx;
1296 char *saved_cmdlines;
1297};
1298static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001299
Steven Rostedt25b0b442008-05-12 21:21:00 +02001300/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001301static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001302
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001303static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001304{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1306}
1307
1308static inline void set_cmdline(int idx, const char *cmdline)
1309{
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1311}
1312
1313static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1315{
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1317 GFP_KERNEL);
1318 if (!s->map_cmdline_to_pid)
1319 return -ENOMEM;
1320
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1324 return -ENOMEM;
1325 }
1326
1327 s->cmdline_idx = 0;
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1333
1334 return 0;
1335}
1336
1337static int trace_create_savedcmd(void)
1338{
1339 int ret;
1340
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001341 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001342 if (!savedcmd)
1343 return -ENOMEM;
1344
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1346 if (ret < 0) {
1347 kfree(savedcmd);
1348 savedcmd = NULL;
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001353}
1354
Carsten Emdeb5130b12009-09-13 01:43:07 +02001355int is_tracing_stopped(void)
1356{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001358}
1359
Steven Rostedt0f048702008-11-05 16:05:44 -05001360/**
1361 * tracing_start - quick start of the tracer
1362 *
1363 * If tracing is enabled but was stopped by tracing_stop,
1364 * this will start the tracer back up.
1365 */
1366void tracing_start(void)
1367{
1368 struct ring_buffer *buffer;
1369 unsigned long flags;
1370
1371 if (tracing_disabled)
1372 return;
1373
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001374 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1375 if (--global_trace.stop_count) {
1376 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001377 /* Someone screwed up their debugging */
1378 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001380 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001381 goto out;
1382 }
1383
Steven Rostedta2f80712010-03-12 19:56:00 -05001384 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001385 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001386
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001388 if (buffer)
1389 ring_buffer_record_enable(buffer);
1390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001391#ifdef CONFIG_TRACER_MAX_TRACE
1392 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001395#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001396
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001397 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001398
Steven Rostedt0f048702008-11-05 16:05:44 -05001399 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001400 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1401}
1402
1403static void tracing_start_tr(struct trace_array *tr)
1404{
1405 struct ring_buffer *buffer;
1406 unsigned long flags;
1407
1408 if (tracing_disabled)
1409 return;
1410
1411 /* If global, we need to also start the max tracer */
1412 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1413 return tracing_start();
1414
1415 raw_spin_lock_irqsave(&tr->start_lock, flags);
1416
1417 if (--tr->stop_count) {
1418 if (tr->stop_count < 0) {
1419 /* Someone screwed up their debugging */
1420 WARN_ON_ONCE(1);
1421 tr->stop_count = 0;
1422 }
1423 goto out;
1424 }
1425
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001426 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001427 if (buffer)
1428 ring_buffer_record_enable(buffer);
1429
1430 out:
1431 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001432}
1433
1434/**
1435 * tracing_stop - quick stop of the tracer
1436 *
1437 * Light weight way to stop tracing. Use in conjunction with
1438 * tracing_start.
1439 */
1440void tracing_stop(void)
1441{
1442 struct ring_buffer *buffer;
1443 unsigned long flags;
1444
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001445 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1446 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001447 goto out;
1448
Steven Rostedta2f80712010-03-12 19:56:00 -05001449 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001450 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001451
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001452 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001453 if (buffer)
1454 ring_buffer_record_disable(buffer);
1455
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001456#ifdef CONFIG_TRACER_MAX_TRACE
1457 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001458 if (buffer)
1459 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001460#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001461
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001462 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001463
Steven Rostedt0f048702008-11-05 16:05:44 -05001464 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001465 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1466}
1467
1468static void tracing_stop_tr(struct trace_array *tr)
1469{
1470 struct ring_buffer *buffer;
1471 unsigned long flags;
1472
1473 /* If global, we need to also stop the max tracer */
1474 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1475 return tracing_stop();
1476
1477 raw_spin_lock_irqsave(&tr->start_lock, flags);
1478 if (tr->stop_count++)
1479 goto out;
1480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001481 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001482 if (buffer)
1483 ring_buffer_record_disable(buffer);
1484
1485 out:
1486 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001487}
1488
Ingo Molnare309b412008-05-12 21:20:51 +02001489void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001491static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001492{
Carsten Emdea635cf02009-03-18 09:00:41 +01001493 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494
1495 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001496 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497
1498 /*
1499 * It's not the end of the world if we don't get
1500 * the lock, but we also don't want to spin
1501 * nor do we want to disable interrupts,
1502 * so if we miss here, then better luck next time.
1503 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001504 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001505 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001506
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001507 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001508 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001510
Carsten Emdea635cf02009-03-18 09:00:41 +01001511 /*
1512 * Check whether the cmdline buffer at idx has a pid
1513 * mapped. We are going to overwrite that entry so we
1514 * need to clear the map_pid_to_cmdline. Otherwise we
1515 * would read the new comm for the old pid.
1516 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001517 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001518 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001519 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001521 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1522 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001523
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525 }
1526
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001527 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001529 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001530
1531 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532}
1533
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001534static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001536 unsigned map;
1537
Steven Rostedt4ca53082009-03-16 19:20:15 -04001538 if (!pid) {
1539 strcpy(comm, "<idle>");
1540 return;
1541 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Steven Rostedt74bf4072010-01-25 15:11:53 -05001543 if (WARN_ON_ONCE(pid < 0)) {
1544 strcpy(comm, "<XXX>");
1545 return;
1546 }
1547
Steven Rostedt4ca53082009-03-16 19:20:15 -04001548 if (pid > PID_MAX_DEFAULT) {
1549 strcpy(comm, "<...>");
1550 return;
1551 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001552
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001553 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001554 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001555 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001556 else
1557 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001558}
1559
1560void trace_find_cmdline(int pid, char comm[])
1561{
1562 preempt_disable();
1563 arch_spin_lock(&trace_cmdline_lock);
1564
1565 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001567 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001568 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001569}
1570
Ingo Molnare309b412008-05-12 21:20:51 +02001571void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001573 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574 return;
1575
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001576 if (!__this_cpu_read(trace_cmdline_save))
1577 return;
1578
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001579 if (trace_save_cmdline(tsk))
1580 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581}
1582
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001583void
Steven Rostedt38697052008-10-01 13:14:09 -04001584tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1585 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586{
1587 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588
Steven Rostedt777e2082008-09-29 23:02:42 -04001589 entry->preempt_count = pc & 0xff;
1590 entry->pid = (tsk) ? tsk->pid : 0;
1591 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001592#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001593 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001594#else
1595 TRACE_FLAG_IRQS_NOSUPPORT |
1596#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001597 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1598 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001599 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1600 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001601}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001602EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603
Steven Rostedte77405a2009-09-02 14:17:06 -04001604struct ring_buffer_event *
1605trace_buffer_lock_reserve(struct ring_buffer *buffer,
1606 int type,
1607 unsigned long len,
1608 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001609{
1610 struct ring_buffer_event *event;
1611
Steven Rostedte77405a2009-09-02 14:17:06 -04001612 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001613 if (event != NULL) {
1614 struct trace_entry *ent = ring_buffer_event_data(event);
1615
1616 tracing_generic_entry_update(ent, flags, pc);
1617 ent->type = type;
1618 }
1619
1620 return event;
1621}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001622
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001623void
1624__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1625{
1626 __this_cpu_write(trace_cmdline_save, true);
1627 ring_buffer_unlock_commit(buffer, event);
1628}
1629
Steven Rostedte77405a2009-09-02 14:17:06 -04001630static inline void
1631__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1632 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001633 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001634{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001635 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001636
Steven Rostedte77405a2009-09-02 14:17:06 -04001637 ftrace_trace_stack(buffer, flags, 6, pc);
1638 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001639}
1640
Steven Rostedte77405a2009-09-02 14:17:06 -04001641void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1642 struct ring_buffer_event *event,
1643 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001644{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001645 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001646}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001647EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001648
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001649static struct ring_buffer *temp_buffer;
1650
Steven Rostedtef5580d2009-02-27 19:38:04 -05001651struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001652trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1653 struct ftrace_event_file *ftrace_file,
1654 int type, unsigned long len,
1655 unsigned long flags, int pc)
1656{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001657 struct ring_buffer_event *entry;
1658
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001659 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001660 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001661 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 /*
1663 * If tracing is off, but we have triggers enabled
1664 * we still need to look at the event data. Use the temp_buffer
1665 * to store the trace event for the tigger to use. It's recusive
1666 * safe and will not be recorded anywhere.
1667 */
1668 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1669 *current_rb = temp_buffer;
1670 entry = trace_buffer_lock_reserve(*current_rb,
1671 type, len, flags, pc);
1672 }
1673 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001674}
1675EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1676
1677struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001678trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1679 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001680 unsigned long flags, int pc)
1681{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001682 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001683 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001684 type, len, flags, pc);
1685}
Steven Rostedt94487d62009-05-05 19:22:53 -04001686EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001687
Steven Rostedte77405a2009-09-02 14:17:06 -04001688void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1689 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001690 unsigned long flags, int pc)
1691{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001692 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001693}
Steven Rostedt94487d62009-05-05 19:22:53 -04001694EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001695
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001696void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1697 struct ring_buffer_event *event,
1698 unsigned long flags, int pc,
1699 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001700{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001701 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001702
1703 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1704 ftrace_trace_userstack(buffer, flags, pc);
1705}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001706EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707
Steven Rostedte77405a2009-09-02 14:17:06 -04001708void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1709 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001710{
Steven Rostedte77405a2009-09-02 14:17:06 -04001711 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001712}
Steven Rostedt12acd472009-04-17 16:01:56 -04001713EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001714
Ingo Molnare309b412008-05-12 21:20:51 +02001715void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001716trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001717 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1718 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001719{
Tom Zanussie1112b42009-03-31 00:48:49 -05001720 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001721 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001722 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001723 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001724
Steven Rostedtd7690412008-10-01 00:29:53 -04001725 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001726 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001727 return;
1728
Steven Rostedte77405a2009-09-02 14:17:06 -04001729 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001730 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001731 if (!event)
1732 return;
1733 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001734 entry->ip = ip;
1735 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001736
Tom Zanussif306cc82013-10-24 08:34:17 -05001737 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001738 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001739}
1740
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001741#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001742
1743#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1744struct ftrace_stack {
1745 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1746};
1747
1748static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1749static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1750
Steven Rostedte77405a2009-09-02 14:17:06 -04001751static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001752 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001753 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001754{
Tom Zanussie1112b42009-03-31 00:48:49 -05001755 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001756 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001757 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001758 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001759 int use_stack;
1760 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001761
1762 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001764
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001765 /*
1766 * Since events can happen in NMIs there's no safe way to
1767 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1768 * or NMI comes in, it will just have to use the default
1769 * FTRACE_STACK_SIZE.
1770 */
1771 preempt_disable_notrace();
1772
Shan Wei82146522012-11-19 13:21:01 +08001773 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001774 /*
1775 * We don't need any atomic variables, just a barrier.
1776 * If an interrupt comes in, we don't care, because it would
1777 * have exited and put the counter back to what we want.
1778 * We just need a barrier to keep gcc from moving things
1779 * around.
1780 */
1781 barrier();
1782 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001783 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001784 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1785
1786 if (regs)
1787 save_stack_trace_regs(regs, &trace);
1788 else
1789 save_stack_trace(&trace);
1790
1791 if (trace.nr_entries > size)
1792 size = trace.nr_entries;
1793 } else
1794 /* From now on, use_stack is a boolean */
1795 use_stack = 0;
1796
1797 size *= sizeof(unsigned long);
1798
1799 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1800 sizeof(*entry) + size, flags, pc);
1801 if (!event)
1802 goto out;
1803 entry = ring_buffer_event_data(event);
1804
1805 memset(&entry->caller, 0, size);
1806
1807 if (use_stack)
1808 memcpy(&entry->caller, trace.entries,
1809 trace.nr_entries * sizeof(unsigned long));
1810 else {
1811 trace.max_entries = FTRACE_STACK_ENTRIES;
1812 trace.entries = entry->caller;
1813 if (regs)
1814 save_stack_trace_regs(regs, &trace);
1815 else
1816 save_stack_trace(&trace);
1817 }
1818
1819 entry->size = trace.nr_entries;
1820
Tom Zanussif306cc82013-10-24 08:34:17 -05001821 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001822 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001823
1824 out:
1825 /* Again, don't let gcc optimize things here */
1826 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001827 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001828 preempt_enable_notrace();
1829
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001830}
1831
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001832void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1833 int skip, int pc, struct pt_regs *regs)
1834{
1835 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1836 return;
1837
1838 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1839}
1840
Steven Rostedte77405a2009-09-02 14:17:06 -04001841void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1842 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001843{
1844 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1845 return;
1846
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001847 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001848}
1849
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001850void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1851 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001852{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001853 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001854}
1855
Steven Rostedt03889382009-12-11 09:48:22 -05001856/**
1857 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001858 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001859 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001860void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001861{
1862 unsigned long flags;
1863
1864 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001865 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001866
1867 local_save_flags(flags);
1868
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001869 /*
1870 * Skip 3 more, seems to get us at the caller of
1871 * this function.
1872 */
1873 skip += 3;
1874 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1875 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001876}
1877
Steven Rostedt91e86e52010-11-10 12:56:12 +01001878static DEFINE_PER_CPU(int, user_stack_count);
1879
Steven Rostedte77405a2009-09-02 14:17:06 -04001880void
1881ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001882{
Tom Zanussie1112b42009-03-31 00:48:49 -05001883 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001884 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001885 struct userstack_entry *entry;
1886 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001887
1888 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1889 return;
1890
Steven Rostedtb6345872010-03-12 20:03:30 -05001891 /*
1892 * NMIs can not handle page faults, even with fix ups.
1893 * The save user stack can (and often does) fault.
1894 */
1895 if (unlikely(in_nmi()))
1896 return;
1897
Steven Rostedt91e86e52010-11-10 12:56:12 +01001898 /*
1899 * prevent recursion, since the user stack tracing may
1900 * trigger other kernel events.
1901 */
1902 preempt_disable();
1903 if (__this_cpu_read(user_stack_count))
1904 goto out;
1905
1906 __this_cpu_inc(user_stack_count);
1907
Steven Rostedte77405a2009-09-02 14:17:06 -04001908 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001909 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001910 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001911 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001912 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001913
Steven Rostedt48659d32009-09-11 11:36:23 -04001914 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001915 memset(&entry->caller, 0, sizeof(entry->caller));
1916
1917 trace.nr_entries = 0;
1918 trace.max_entries = FTRACE_STACK_ENTRIES;
1919 trace.skip = 0;
1920 trace.entries = entry->caller;
1921
1922 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001923 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001924 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001925
Li Zefan1dbd1952010-12-09 15:47:56 +08001926 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001927 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001928 out:
1929 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001930}
1931
Hannes Eder4fd27352009-02-10 19:44:12 +01001932#ifdef UNUSED
1933static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001934{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001935 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001936}
Hannes Eder4fd27352009-02-10 19:44:12 +01001937#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001938
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001939#endif /* CONFIG_STACKTRACE */
1940
Steven Rostedt07d777f2011-09-22 14:01:55 -04001941/* created for use with alloc_percpu */
1942struct trace_buffer_struct {
1943 char buffer[TRACE_BUF_SIZE];
1944};
1945
1946static struct trace_buffer_struct *trace_percpu_buffer;
1947static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1948static struct trace_buffer_struct *trace_percpu_irq_buffer;
1949static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1950
1951/*
1952 * The buffer used is dependent on the context. There is a per cpu
1953 * buffer for normal context, softirq contex, hard irq context and
1954 * for NMI context. Thise allows for lockless recording.
1955 *
1956 * Note, if the buffers failed to be allocated, then this returns NULL
1957 */
1958static char *get_trace_buf(void)
1959{
1960 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001961
1962 /*
1963 * If we have allocated per cpu buffers, then we do not
1964 * need to do any locking.
1965 */
1966 if (in_nmi())
1967 percpu_buffer = trace_percpu_nmi_buffer;
1968 else if (in_irq())
1969 percpu_buffer = trace_percpu_irq_buffer;
1970 else if (in_softirq())
1971 percpu_buffer = trace_percpu_sirq_buffer;
1972 else
1973 percpu_buffer = trace_percpu_buffer;
1974
1975 if (!percpu_buffer)
1976 return NULL;
1977
Shan Weid8a03492012-11-13 09:53:04 +08001978 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001979}
1980
1981static int alloc_percpu_trace_buffer(void)
1982{
1983 struct trace_buffer_struct *buffers;
1984 struct trace_buffer_struct *sirq_buffers;
1985 struct trace_buffer_struct *irq_buffers;
1986 struct trace_buffer_struct *nmi_buffers;
1987
1988 buffers = alloc_percpu(struct trace_buffer_struct);
1989 if (!buffers)
1990 goto err_warn;
1991
1992 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1993 if (!sirq_buffers)
1994 goto err_sirq;
1995
1996 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1997 if (!irq_buffers)
1998 goto err_irq;
1999
2000 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2001 if (!nmi_buffers)
2002 goto err_nmi;
2003
2004 trace_percpu_buffer = buffers;
2005 trace_percpu_sirq_buffer = sirq_buffers;
2006 trace_percpu_irq_buffer = irq_buffers;
2007 trace_percpu_nmi_buffer = nmi_buffers;
2008
2009 return 0;
2010
2011 err_nmi:
2012 free_percpu(irq_buffers);
2013 err_irq:
2014 free_percpu(sirq_buffers);
2015 err_sirq:
2016 free_percpu(buffers);
2017 err_warn:
2018 WARN(1, "Could not allocate percpu trace_printk buffer");
2019 return -ENOMEM;
2020}
2021
Steven Rostedt81698832012-10-11 10:15:05 -04002022static int buffers_allocated;
2023
Steven Rostedt07d777f2011-09-22 14:01:55 -04002024void trace_printk_init_buffers(void)
2025{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002026 if (buffers_allocated)
2027 return;
2028
2029 if (alloc_percpu_trace_buffer())
2030 return;
2031
Steven Rostedt2184db42014-05-28 13:14:40 -04002032 /* trace_printk() is for debug use only. Don't use it in production. */
2033
2034 pr_warning("\n**********************************************************\n");
2035 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2036 pr_warning("** **\n");
2037 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2040 pr_warning("** unsafe for produciton use. **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** If you see this message and you are not debugging **\n");
2043 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2044 pr_warning("** **\n");
2045 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2046 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002047
Steven Rostedtb382ede62012-10-10 21:44:34 -04002048 /* Expand the buffers to set size */
2049 tracing_update_buffers();
2050
Steven Rostedt07d777f2011-09-22 14:01:55 -04002051 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002052
2053 /*
2054 * trace_printk_init_buffers() can be called by modules.
2055 * If that happens, then we need to start cmdline recording
2056 * directly here. If the global_trace.buffer is already
2057 * allocated here, then this was called by module code.
2058 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002059 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002060 tracing_start_cmdline_record();
2061}
2062
2063void trace_printk_start_comm(void)
2064{
2065 /* Start tracing comms if trace printk is set */
2066 if (!buffers_allocated)
2067 return;
2068 tracing_start_cmdline_record();
2069}
2070
2071static void trace_printk_start_stop_comm(int enabled)
2072{
2073 if (!buffers_allocated)
2074 return;
2075
2076 if (enabled)
2077 tracing_start_cmdline_record();
2078 else
2079 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002080}
2081
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002082/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002083 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084 *
2085 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002086int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002087{
Tom Zanussie1112b42009-03-31 00:48:49 -05002088 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002090 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002091 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002092 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002093 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002094 char *tbuffer;
2095 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096
2097 if (unlikely(tracing_selftest_running || tracing_disabled))
2098 return 0;
2099
2100 /* Don't pollute graph traces with trace_vprintk internals */
2101 pause_graph_tracing();
2102
2103 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002104 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002105
Steven Rostedt07d777f2011-09-22 14:01:55 -04002106 tbuffer = get_trace_buf();
2107 if (!tbuffer) {
2108 len = 0;
2109 goto out;
2110 }
2111
2112 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2113
2114 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002115 goto out;
2116
Steven Rostedt07d777f2011-09-22 14:01:55 -04002117 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002118 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002119 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002120 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2121 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002122 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002123 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124 entry = ring_buffer_event_data(event);
2125 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 entry->fmt = fmt;
2127
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002129 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002130 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002131 ftrace_trace_stack(buffer, flags, 6, pc);
2132 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002133
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002134out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002135 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136 unpause_graph_tracing();
2137
2138 return len;
2139}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002140EXPORT_SYMBOL_GPL(trace_vbprintk);
2141
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002142static int
2143__trace_array_vprintk(struct ring_buffer *buffer,
2144 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002145{
Tom Zanussie1112b42009-03-31 00:48:49 -05002146 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002147 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002148 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002149 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002150 unsigned long flags;
2151 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152
2153 if (tracing_disabled || tracing_selftest_running)
2154 return 0;
2155
Steven Rostedt07d777f2011-09-22 14:01:55 -04002156 /* Don't pollute graph traces with trace_vprintk internals */
2157 pause_graph_tracing();
2158
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002159 pc = preempt_count();
2160 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002161
Steven Rostedt07d777f2011-09-22 14:01:55 -04002162
2163 tbuffer = get_trace_buf();
2164 if (!tbuffer) {
2165 len = 0;
2166 goto out;
2167 }
2168
2169 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2170 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002171 goto out;
2172
Steven Rostedt07d777f2011-09-22 14:01:55 -04002173 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002174 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002175 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002176 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002177 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002178 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002179 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002180 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181
Steven Rostedt07d777f2011-09-22 14:01:55 -04002182 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002183 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002184 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002185 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002186 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002187 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002188 out:
2189 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002190 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191
2192 return len;
2193}
Steven Rostedt659372d2009-09-03 19:11:07 -04002194
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002195int trace_array_vprintk(struct trace_array *tr,
2196 unsigned long ip, const char *fmt, va_list args)
2197{
2198 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2199}
2200
2201int trace_array_printk(struct trace_array *tr,
2202 unsigned long ip, const char *fmt, ...)
2203{
2204 int ret;
2205 va_list ap;
2206
2207 if (!(trace_flags & TRACE_ITER_PRINTK))
2208 return 0;
2209
2210 va_start(ap, fmt);
2211 ret = trace_array_vprintk(tr, ip, fmt, ap);
2212 va_end(ap);
2213 return ret;
2214}
2215
2216int trace_array_printk_buf(struct ring_buffer *buffer,
2217 unsigned long ip, const char *fmt, ...)
2218{
2219 int ret;
2220 va_list ap;
2221
2222 if (!(trace_flags & TRACE_ITER_PRINTK))
2223 return 0;
2224
2225 va_start(ap, fmt);
2226 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2227 va_end(ap);
2228 return ret;
2229}
2230
Steven Rostedt659372d2009-09-03 19:11:07 -04002231int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2232{
Steven Rostedta813a152009-10-09 01:41:35 -04002233 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002234}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002235EXPORT_SYMBOL_GPL(trace_vprintk);
2236
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002237static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002238{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002239 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2240
Steven Rostedt5a90f572008-09-03 17:42:51 -04002241 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002242 if (buf_iter)
2243 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002244}
2245
Ingo Molnare309b412008-05-12 21:20:51 +02002246static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002247peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2248 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002249{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002250 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002251 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002252
Steven Rostedtd7690412008-10-01 00:29:53 -04002253 if (buf_iter)
2254 event = ring_buffer_iter_peek(buf_iter, ts);
2255 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002256 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002257 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002258
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002259 if (event) {
2260 iter->ent_size = ring_buffer_event_length(event);
2261 return ring_buffer_event_data(event);
2262 }
2263 iter->ent_size = 0;
2264 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002265}
Steven Rostedtd7690412008-10-01 00:29:53 -04002266
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002268__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2269 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002271 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002273 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002274 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002275 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002276 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002277 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int cpu;
2279
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002280 /*
2281 * If we are in a per_cpu trace file, don't bother by iterating over
2282 * all cpu and peek directly.
2283 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002284 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002285 if (ring_buffer_empty_cpu(buffer, cpu_file))
2286 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002287 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002288 if (ent_cpu)
2289 *ent_cpu = cpu_file;
2290
2291 return ent;
2292 }
2293
Steven Rostedtab464282008-05-12 21:21:00 +02002294 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002295
2296 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002298
Steven Rostedtbc21b472010-03-31 19:49:26 -04002299 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002301 /*
2302 * Pick the entry with the smallest timestamp:
2303 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002304 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 next = ent;
2306 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002307 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002308 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002309 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 }
2311 }
2312
Steven Rostedt12b5da32012-03-27 10:43:28 -04002313 iter->ent_size = next_size;
2314
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 if (ent_cpu)
2316 *ent_cpu = next_cpu;
2317
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002318 if (ent_ts)
2319 *ent_ts = next_ts;
2320
Steven Rostedtbc21b472010-03-31 19:49:26 -04002321 if (missing_events)
2322 *missing_events = next_lost;
2323
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002324 return next;
2325}
2326
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002327/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002328struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2329 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002330{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002331 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002332}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002333
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002335void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002337 iter->ent = __find_next_entry(iter, &iter->cpu,
2338 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002339
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002340 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002341 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002342
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002343 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002344}
2345
Ingo Molnare309b412008-05-12 21:20:51 +02002346static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002347{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002348 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002349 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002350}
2351
Ingo Molnare309b412008-05-12 21:20:51 +02002352static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353{
2354 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002356 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002358 WARN_ON_ONCE(iter->leftover);
2359
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360 (*pos)++;
2361
2362 /* can't go backwards */
2363 if (iter->idx > i)
2364 return NULL;
2365
2366 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002367 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 else
2369 ent = iter;
2370
2371 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002372 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002373
2374 iter->pos = *pos;
2375
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002376 return ent;
2377}
2378
Jason Wessel955b61e2010-08-05 09:22:23 -05002379void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002380{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002381 struct ring_buffer_event *event;
2382 struct ring_buffer_iter *buf_iter;
2383 unsigned long entries = 0;
2384 u64 ts;
2385
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002386 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002387
Steven Rostedt6d158a82012-06-27 20:46:14 -04002388 buf_iter = trace_buffer_iter(iter, cpu);
2389 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390 return;
2391
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 ring_buffer_iter_reset(buf_iter);
2393
2394 /*
2395 * We could have the case with the max latency tracers
2396 * that a reset never took place on a cpu. This is evident
2397 * by the timestamp being before the start of the buffer.
2398 */
2399 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002400 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002401 break;
2402 entries++;
2403 ring_buffer_read(buf_iter, NULL);
2404 }
2405
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002406 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002407}
2408
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002409/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002410 * The current tracer is copied to avoid a global locking
2411 * all around.
2412 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413static void *s_start(struct seq_file *m, loff_t *pos)
2414{
2415 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002416 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002417 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 void *p = NULL;
2419 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002420 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002422 /*
2423 * copy the tracer to avoid using a global lock all around.
2424 * iter->trace is a copy of current_trace, the pointer to the
2425 * name may be used instead of a strcmp(), as iter->trace->name
2426 * will point to the same string as current_trace->name.
2427 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002429 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2430 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002431 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002433#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002434 if (iter->snapshot && iter->trace->use_max_tr)
2435 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002436#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002437
2438 if (!iter->snapshot)
2439 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002440
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002441 if (*pos != iter->pos) {
2442 iter->ent = NULL;
2443 iter->cpu = 0;
2444 iter->idx = -1;
2445
Steven Rostedtae3b5092013-01-23 15:22:59 -05002446 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002447 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002448 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002451
Lai Jiangshanac91d852010-03-02 17:54:50 +08002452 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2454 ;
2455
2456 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002457 /*
2458 * If we overflowed the seq_file before, then we want
2459 * to just reuse the trace_seq buffer again.
2460 */
2461 if (iter->leftover)
2462 p = iter;
2463 else {
2464 l = *pos - 1;
2465 p = s_next(m, p, &l);
2466 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002467 }
2468
Lai Jiangshan4f535962009-05-18 19:35:34 +08002469 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002470 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471 return p;
2472}
2473
2474static void s_stop(struct seq_file *m, void *p)
2475{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002476 struct trace_iterator *iter = m->private;
2477
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002478#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002479 if (iter->snapshot && iter->trace->use_max_tr)
2480 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002482
2483 if (!iter->snapshot)
2484 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002485
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002486 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002487 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002488}
2489
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002490static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491get_total_entries(struct trace_buffer *buf,
2492 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002493{
2494 unsigned long count;
2495 int cpu;
2496
2497 *total = 0;
2498 *entries = 0;
2499
2500 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002501 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002502 /*
2503 * If this buffer has skipped entries, then we hold all
2504 * entries for the trace and we need to ignore the
2505 * ones before the time stamp.
2506 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002507 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2508 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002509 /* total is the same as the entries */
2510 *total += count;
2511 } else
2512 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002513 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002514 *entries += count;
2515 }
2516}
2517
Ingo Molnare309b412008-05-12 21:20:51 +02002518static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519{
Michael Ellermana6168352008-08-20 16:36:11 -07002520 seq_puts(m, "# _------=> CPU# \n");
2521 seq_puts(m, "# / _-----=> irqs-off \n");
2522 seq_puts(m, "# | / _----=> need-resched \n");
2523 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2524 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002525 seq_puts(m, "# |||| / delay \n");
2526 seq_puts(m, "# cmd pid ||||| time | caller \n");
2527 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528}
2529
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002532 unsigned long total;
2533 unsigned long entries;
2534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002536 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2537 entries, total, num_online_cpus());
2538 seq_puts(m, "#\n");
2539}
2540
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002541static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002542{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002544 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002545 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002546}
2547
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002548static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002549{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551 seq_puts(m, "# _-----=> irqs-off\n");
2552 seq_puts(m, "# / _----=> need-resched\n");
2553 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2554 seq_puts(m, "# || / _--=> preempt-depth\n");
2555 seq_puts(m, "# ||| / delay\n");
2556 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2557 seq_puts(m, "# | | | |||| | |\n");
2558}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002559
Jiri Olsa62b915f2010-04-02 19:01:22 +02002560void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2562{
2563 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 struct trace_buffer *buf = iter->trace_buffer;
2565 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002566 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002567 unsigned long entries;
2568 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569 const char *name = "preemption";
2570
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002571 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002573 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002575 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002581 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002583 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002584 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585#if defined(CONFIG_PREEMPT_NONE)
2586 "server",
2587#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2588 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002589#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590 "preempt",
2591#else
2592 "unknown",
2593#endif
2594 /* These are reserved for later use */
2595 0, 0, 0, 0);
2596#ifdef CONFIG_SMP
2597 seq_printf(m, " #P:%d)\n", num_online_cpus());
2598#else
2599 seq_puts(m, ")\n");
2600#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002601 seq_puts(m, "# -----------------\n");
2602 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002604 data->comm, data->pid,
2605 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002606 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002607 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608
2609 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002610 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002611 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2612 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002613 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002614 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2615 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002616 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002617 }
2618
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002619 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620}
2621
Steven Rostedta3097202008-11-07 22:36:02 -05002622static void test_cpu_buff_start(struct trace_iterator *iter)
2623{
2624 struct trace_seq *s = &iter->seq;
2625
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002626 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2627 return;
2628
2629 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2630 return;
2631
Rusty Russell44623442009-01-01 10:12:23 +10302632 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002633 return;
2634
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002635 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002636 return;
2637
Rusty Russell44623442009-01-01 10:12:23 +10302638 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002639
2640 /* Don't print started cpu buffer for the first entry of the trace */
2641 if (iter->idx > 1)
2642 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2643 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002644}
2645
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002646static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002647{
Steven Rostedt214023c2008-05-12 21:20:46 +02002648 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002650 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002651 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002653 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002654
Steven Rostedta3097202008-11-07 22:36:02 -05002655 test_cpu_buff_start(iter);
2656
Steven Rostedtf633cef2008-12-23 23:24:13 -05002657 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002658
2659 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002660 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2661 if (!trace_print_lat_context(iter))
2662 goto partial;
2663 } else {
2664 if (!trace_print_context(iter))
2665 goto partial;
2666 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002667 }
2668
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002669 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002670 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002671
2672 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2673 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002674
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002675 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002676partial:
2677 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002678}
2679
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002680static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002681{
2682 struct trace_seq *s = &iter->seq;
2683 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002684 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002685
2686 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002687
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002688 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002689 if (!trace_seq_printf(s, "%d %d %llu ",
2690 entry->pid, iter->cpu, iter->ts))
2691 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002692 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002693
Steven Rostedtf633cef2008-12-23 23:24:13 -05002694 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002695 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002696 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002697
2698 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2699 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002700
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002701 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002702partial:
2703 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002704}
2705
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002706static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002707{
2708 struct trace_seq *s = &iter->seq;
2709 unsigned char newline = '\n';
2710 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002711 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002712
2713 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002714
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002715 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2716 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2717 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2718 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2719 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002720
Steven Rostedtf633cef2008-12-23 23:24:13 -05002721 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002722 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002723 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002724 if (ret != TRACE_TYPE_HANDLED)
2725 return ret;
2726 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002727
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002728 SEQ_PUT_FIELD_RET(s, newline);
2729
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002730 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002731}
2732
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002733static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002734{
2735 struct trace_seq *s = &iter->seq;
2736 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002737 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002738
2739 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002740
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002741 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2742 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002743 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002744 SEQ_PUT_FIELD_RET(s, iter->ts);
2745 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002746
Steven Rostedtf633cef2008-12-23 23:24:13 -05002747 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002748 return event ? event->funcs->binary(iter, 0, event) :
2749 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002750}
2751
Jiri Olsa62b915f2010-04-02 19:01:22 +02002752int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002753{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002754 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755 int cpu;
2756
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002757 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002758 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002759 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002760 buf_iter = trace_buffer_iter(iter, cpu);
2761 if (buf_iter) {
2762 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002763 return 0;
2764 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002765 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002766 return 0;
2767 }
2768 return 1;
2769 }
2770
Steven Rostedtab464282008-05-12 21:21:00 +02002771 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002772 buf_iter = trace_buffer_iter(iter, cpu);
2773 if (buf_iter) {
2774 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002775 return 0;
2776 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002777 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002778 return 0;
2779 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002780 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002781
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002782 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002783}
2784
Lai Jiangshan4f535962009-05-18 19:35:34 +08002785/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002786enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002787{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002788 enum print_line_t ret;
2789
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002790 if (iter->lost_events &&
2791 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2792 iter->cpu, iter->lost_events))
2793 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002794
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002795 if (iter->trace && iter->trace->print_line) {
2796 ret = iter->trace->print_line(iter);
2797 if (ret != TRACE_TYPE_UNHANDLED)
2798 return ret;
2799 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002800
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002801 if (iter->ent->type == TRACE_BPUTS &&
2802 trace_flags & TRACE_ITER_PRINTK &&
2803 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2804 return trace_print_bputs_msg_only(iter);
2805
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002806 if (iter->ent->type == TRACE_BPRINT &&
2807 trace_flags & TRACE_ITER_PRINTK &&
2808 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002809 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002810
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002811 if (iter->ent->type == TRACE_PRINT &&
2812 trace_flags & TRACE_ITER_PRINTK &&
2813 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002814 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002815
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002816 if (trace_flags & TRACE_ITER_BIN)
2817 return print_bin_fmt(iter);
2818
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002819 if (trace_flags & TRACE_ITER_HEX)
2820 return print_hex_fmt(iter);
2821
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002822 if (trace_flags & TRACE_ITER_RAW)
2823 return print_raw_fmt(iter);
2824
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002825 return print_trace_fmt(iter);
2826}
2827
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002828void trace_latency_header(struct seq_file *m)
2829{
2830 struct trace_iterator *iter = m->private;
2831
2832 /* print nothing if the buffers are empty */
2833 if (trace_empty(iter))
2834 return;
2835
2836 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2837 print_trace_header(m, iter);
2838
2839 if (!(trace_flags & TRACE_ITER_VERBOSE))
2840 print_lat_help_header(m);
2841}
2842
Jiri Olsa62b915f2010-04-02 19:01:22 +02002843void trace_default_header(struct seq_file *m)
2844{
2845 struct trace_iterator *iter = m->private;
2846
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002847 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2848 return;
2849
Jiri Olsa62b915f2010-04-02 19:01:22 +02002850 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2851 /* print nothing if the buffers are empty */
2852 if (trace_empty(iter))
2853 return;
2854 print_trace_header(m, iter);
2855 if (!(trace_flags & TRACE_ITER_VERBOSE))
2856 print_lat_help_header(m);
2857 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002858 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2859 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002860 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002861 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002862 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002863 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002864 }
2865}
2866
Steven Rostedte0a413f2011-09-29 21:26:16 -04002867static void test_ftrace_alive(struct seq_file *m)
2868{
2869 if (!ftrace_is_dead())
2870 return;
2871 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2872 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2873}
2874
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002875#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002876static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002877{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002878 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2879 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2880 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002881 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002882 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2883 seq_printf(m, "# is not a '0' or '1')\n");
2884}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002885
2886static void show_snapshot_percpu_help(struct seq_file *m)
2887{
2888 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2889#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2890 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2891 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2892#else
2893 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2894 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2895#endif
2896 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2897 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2898 seq_printf(m, "# is not a '0' or '1')\n");
2899}
2900
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002901static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2902{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002903 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002904 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2905 else
2906 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2907
2908 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002909 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2910 show_snapshot_main_help(m);
2911 else
2912 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002913}
2914#else
2915/* Should never be called */
2916static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2917#endif
2918
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919static int s_show(struct seq_file *m, void *v)
2920{
2921 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002922 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923
2924 if (iter->ent == NULL) {
2925 if (iter->tr) {
2926 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2927 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002928 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002930 if (iter->snapshot && trace_empty(iter))
2931 print_snapshot_help(m, iter);
2932 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002933 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002934 else
2935 trace_default_header(m);
2936
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002937 } else if (iter->leftover) {
2938 /*
2939 * If we filled the seq_file buffer earlier, we
2940 * want to just show it now.
2941 */
2942 ret = trace_print_seq(m, &iter->seq);
2943
2944 /* ret should this time be zero, but you never know */
2945 iter->leftover = ret;
2946
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002947 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002948 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002949 ret = trace_print_seq(m, &iter->seq);
2950 /*
2951 * If we overflow the seq_file buffer, then it will
2952 * ask us for this data again at start up.
2953 * Use that instead.
2954 * ret is 0 if seq_file write succeeded.
2955 * -1 otherwise.
2956 */
2957 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002958 }
2959
2960 return 0;
2961}
2962
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002963/*
2964 * Should be used after trace_array_get(), trace_types_lock
2965 * ensures that i_cdev was already initialized.
2966 */
2967static inline int tracing_get_cpu(struct inode *inode)
2968{
2969 if (inode->i_cdev) /* See trace_create_cpu_file() */
2970 return (long)inode->i_cdev - 1;
2971 return RING_BUFFER_ALL_CPUS;
2972}
2973
James Morris88e9d342009-09-22 16:43:43 -07002974static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002975 .start = s_start,
2976 .next = s_next,
2977 .stop = s_stop,
2978 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002979};
2980
Ingo Molnare309b412008-05-12 21:20:51 +02002981static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002982__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002983{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002984 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002986 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002988 if (tracing_disabled)
2989 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002990
Jiri Olsa50e18b92012-04-25 10:23:39 +02002991 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002992 if (!iter)
2993 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002994
Steven Rostedt6d158a82012-06-27 20:46:14 -04002995 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2996 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002997 if (!iter->buffer_iter)
2998 goto release;
2999
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003000 /*
3001 * We make a copy of the current tracer to avoid concurrent
3002 * changes on it while we are reading.
3003 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003004 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003005 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003006 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003008
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003009 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003010
Li Zefan79f55992009-06-15 14:58:26 +08003011 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003012 goto fail;
3013
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003014 iter->tr = tr;
3015
3016#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003017 /* Currently only the top directory has a snapshot */
3018 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003019 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003021#endif
3022 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003023 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003025 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003026 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003028 /* Notify the tracer early; before we stop tracing. */
3029 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003030 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003031
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003032 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003033 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003034 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3035
David Sharp8be07092012-11-13 12:18:22 -08003036 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003037 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003038 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3039
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003040 /* stop the trace while dumping if we are not opening "snapshot" */
3041 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003042 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003043
Steven Rostedtae3b5092013-01-23 15:22:59 -05003044 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003045 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003046 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003047 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003048 }
3049 ring_buffer_read_prepare_sync();
3050 for_each_tracing_cpu(cpu) {
3051 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003052 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003053 }
3054 } else {
3055 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003056 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003057 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003058 ring_buffer_read_prepare_sync();
3059 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003060 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003061 }
3062
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 mutex_unlock(&trace_types_lock);
3064
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003066
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003067 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003068 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003069 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003070 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003071release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003072 seq_release_private(inode, file);
3073 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003074}
3075
3076int tracing_open_generic(struct inode *inode, struct file *filp)
3077{
Steven Rostedt60a11772008-05-12 21:20:44 +02003078 if (tracing_disabled)
3079 return -ENODEV;
3080
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003081 filp->private_data = inode->i_private;
3082 return 0;
3083}
3084
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003085bool tracing_is_disabled(void)
3086{
3087 return (tracing_disabled) ? true: false;
3088}
3089
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003090/*
3091 * Open and update trace_array ref count.
3092 * Must have the current trace_array passed to it.
3093 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003094static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003095{
3096 struct trace_array *tr = inode->i_private;
3097
3098 if (tracing_disabled)
3099 return -ENODEV;
3100
3101 if (trace_array_get(tr) < 0)
3102 return -ENODEV;
3103
3104 filp->private_data = inode->i_private;
3105
3106 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003107}
3108
Hannes Eder4fd27352009-02-10 19:44:12 +01003109static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003110{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003112 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003113 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003114 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003115
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003116 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003117 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003118 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003119 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003120
Oleg Nesterov6484c712013-07-23 17:26:10 +02003121 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003123 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003124
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003125 for_each_tracing_cpu(cpu) {
3126 if (iter->buffer_iter[cpu])
3127 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3128 }
3129
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003130 if (iter->trace && iter->trace->close)
3131 iter->trace->close(iter);
3132
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003133 if (!iter->snapshot)
3134 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003135 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003136
3137 __trace_array_put(tr);
3138
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003139 mutex_unlock(&trace_types_lock);
3140
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003141 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003142 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003143 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003144 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003145 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003146
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003147 return 0;
3148}
3149
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003150static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 trace_array_put(tr);
3155 return 0;
3156}
3157
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003158static int tracing_single_release_tr(struct inode *inode, struct file *file)
3159{
3160 struct trace_array *tr = inode->i_private;
3161
3162 trace_array_put(tr);
3163
3164 return single_release(inode, file);
3165}
3166
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003167static int tracing_open(struct inode *inode, struct file *file)
3168{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003169 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003170 struct trace_iterator *iter;
3171 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003173 if (trace_array_get(tr) < 0)
3174 return -ENODEV;
3175
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003176 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003177 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3178 int cpu = tracing_get_cpu(inode);
3179
3180 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003181 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003182 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003183 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003184 }
3185
3186 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003187 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003188 if (IS_ERR(iter))
3189 ret = PTR_ERR(iter);
3190 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3191 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3192 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003193
3194 if (ret < 0)
3195 trace_array_put(tr);
3196
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003197 return ret;
3198}
3199
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003200/*
3201 * Some tracers are not suitable for instance buffers.
3202 * A tracer is always available for the global array (toplevel)
3203 * or if it explicitly states that it is.
3204 */
3205static bool
3206trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3207{
3208 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3209}
3210
3211/* Find the next tracer that this trace array may use */
3212static struct tracer *
3213get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3214{
3215 while (t && !trace_ok_for_array(t, tr))
3216 t = t->next;
3217
3218 return t;
3219}
3220
Ingo Molnare309b412008-05-12 21:20:51 +02003221static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222t_next(struct seq_file *m, void *v, loff_t *pos)
3223{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003224 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003225 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226
3227 (*pos)++;
3228
3229 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003230 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003231
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003232 return t;
3233}
3234
3235static void *t_start(struct seq_file *m, loff_t *pos)
3236{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003237 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003238 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003239 loff_t l = 0;
3240
3241 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003242
3243 t = get_tracer_for_array(tr, trace_types);
3244 for (; t && l < *pos; t = t_next(m, t, &l))
3245 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003246
3247 return t;
3248}
3249
3250static void t_stop(struct seq_file *m, void *p)
3251{
3252 mutex_unlock(&trace_types_lock);
3253}
3254
3255static int t_show(struct seq_file *m, void *v)
3256{
3257 struct tracer *t = v;
3258
3259 if (!t)
3260 return 0;
3261
3262 seq_printf(m, "%s", t->name);
3263 if (t->next)
3264 seq_putc(m, ' ');
3265 else
3266 seq_putc(m, '\n');
3267
3268 return 0;
3269}
3270
James Morris88e9d342009-09-22 16:43:43 -07003271static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003272 .start = t_start,
3273 .next = t_next,
3274 .stop = t_stop,
3275 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003276};
3277
3278static int show_traces_open(struct inode *inode, struct file *file)
3279{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003280 struct trace_array *tr = inode->i_private;
3281 struct seq_file *m;
3282 int ret;
3283
Steven Rostedt60a11772008-05-12 21:20:44 +02003284 if (tracing_disabled)
3285 return -ENODEV;
3286
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003287 ret = seq_open(file, &show_traces_seq_ops);
3288 if (ret)
3289 return ret;
3290
3291 m = file->private_data;
3292 m->private = tr;
3293
3294 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003295}
3296
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003297static ssize_t
3298tracing_write_stub(struct file *filp, const char __user *ubuf,
3299 size_t count, loff_t *ppos)
3300{
3301 return count;
3302}
3303
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003304loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003305{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003306 int ret;
3307
Slava Pestov364829b2010-11-24 15:13:16 -08003308 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003309 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003310 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003311 file->f_pos = ret = 0;
3312
3313 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003314}
3315
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003316static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003317 .open = tracing_open,
3318 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003319 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003320 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003321 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003322};
3323
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003324static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003325 .open = show_traces_open,
3326 .read = seq_read,
3327 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003328 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003329};
3330
Ingo Molnar36dfe922008-05-12 21:20:52 +02003331/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003332 * The tracer itself will not take this lock, but still we want
3333 * to provide a consistent cpumask to user-space:
3334 */
3335static DEFINE_MUTEX(tracing_cpumask_update_lock);
3336
3337/*
3338 * Temporary storage for the character representation of the
3339 * CPU bitmask (and one more byte for the newline):
3340 */
3341static char mask_str[NR_CPUS + 1];
3342
Ingo Molnarc7078de2008-05-12 21:20:52 +02003343static ssize_t
3344tracing_cpumask_read(struct file *filp, char __user *ubuf,
3345 size_t count, loff_t *ppos)
3346{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003347 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003348 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003349
3350 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003351
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003352 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003353 if (count - len < 2) {
3354 count = -EINVAL;
3355 goto out_err;
3356 }
3357 len += sprintf(mask_str + len, "\n");
3358 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3359
3360out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003361 mutex_unlock(&tracing_cpumask_update_lock);
3362
3363 return count;
3364}
3365
3366static ssize_t
3367tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3368 size_t count, loff_t *ppos)
3369{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003370 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303371 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003372 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303373
3374 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3375 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003376
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303377 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003378 if (err)
3379 goto err_unlock;
3380
Li Zefan215368e2009-06-15 10:56:42 +08003381 mutex_lock(&tracing_cpumask_update_lock);
3382
Steven Rostedta5e25882008-12-02 15:34:05 -05003383 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003384 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003385 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003386 /*
3387 * Increase/decrease the disabled counter if we are
3388 * about to flip a bit in the cpumask:
3389 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003390 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303391 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003392 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3393 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003394 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003395 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303396 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003397 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3398 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003399 }
3400 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003401 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003402 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003403
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003404 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003405
Ingo Molnarc7078de2008-05-12 21:20:52 +02003406 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303407 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003408
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003410
3411err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003412 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003413
3414 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003415}
3416
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003417static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003418 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003419 .read = tracing_cpumask_read,
3420 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003421 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003422 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003423};
3424
Li Zefanfdb372e2009-12-08 11:15:59 +08003425static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003427 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003428 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003429 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003430 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003431
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003432 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003433 tracer_flags = tr->current_trace->flags->val;
3434 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003435
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003436 for (i = 0; trace_options[i]; i++) {
3437 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003438 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003440 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003441 }
3442
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003443 for (i = 0; trace_opts[i].name; i++) {
3444 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003445 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003446 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003447 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003448 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003449 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450
Li Zefanfdb372e2009-12-08 11:15:59 +08003451 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003452}
3453
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003454static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003455 struct tracer_flags *tracer_flags,
3456 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003457{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003458 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003459 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003460
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003461 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003462 if (ret)
3463 return ret;
3464
3465 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003466 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003467 else
Zhaolei77708412009-08-07 18:53:21 +08003468 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003469 return 0;
3470}
3471
Li Zefan8d18eaa2009-12-08 11:17:06 +08003472/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003473static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003474{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003475 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003476 struct tracer_flags *tracer_flags = trace->flags;
3477 struct tracer_opt *opts = NULL;
3478 int i;
3479
3480 for (i = 0; tracer_flags->opts[i].name; i++) {
3481 opts = &tracer_flags->opts[i];
3482
3483 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003484 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003485 }
3486
3487 return -EINVAL;
3488}
3489
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003490/* Some tracers require overwrite to stay enabled */
3491int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3492{
3493 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3494 return -1;
3495
3496 return 0;
3497}
3498
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003499int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003500{
3501 /* do nothing if flag is already set */
3502 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003503 return 0;
3504
3505 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003506 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003507 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003508 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003509
3510 if (enabled)
3511 trace_flags |= mask;
3512 else
3513 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003514
3515 if (mask == TRACE_ITER_RECORD_CMD)
3516 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003517
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003518 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003519 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003520#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003521 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003522#endif
3523 }
Steven Rostedt81698832012-10-11 10:15:05 -04003524
3525 if (mask == TRACE_ITER_PRINTK)
3526 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003527
3528 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003529}
3530
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003531static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003532{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003533 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003535 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536 int i;
3537
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003538 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539
Li Zefan8d18eaa2009-12-08 11:17:06 +08003540 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541 neg = 1;
3542 cmp += 2;
3543 }
3544
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003545 mutex_lock(&trace_types_lock);
3546
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003547 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003548 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003549 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003550 break;
3551 }
3552 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003553
3554 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003555 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003556 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003557
3558 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003559
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003560 return ret;
3561}
3562
3563static ssize_t
3564tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3565 size_t cnt, loff_t *ppos)
3566{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003567 struct seq_file *m = filp->private_data;
3568 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003569 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003570 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003571
3572 if (cnt >= sizeof(buf))
3573 return -EINVAL;
3574
3575 if (copy_from_user(&buf, ubuf, cnt))
3576 return -EFAULT;
3577
Steven Rostedta8dd2172013-01-09 20:54:17 -05003578 buf[cnt] = 0;
3579
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003580 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003581 if (ret < 0)
3582 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003583
Jiri Olsacf8517c2009-10-23 19:36:16 -04003584 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003585
3586 return cnt;
3587}
3588
Li Zefanfdb372e2009-12-08 11:15:59 +08003589static int tracing_trace_options_open(struct inode *inode, struct file *file)
3590{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003591 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003592 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003593
Li Zefanfdb372e2009-12-08 11:15:59 +08003594 if (tracing_disabled)
3595 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003596
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003597 if (trace_array_get(tr) < 0)
3598 return -ENODEV;
3599
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003600 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3601 if (ret < 0)
3602 trace_array_put(tr);
3603
3604 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003605}
3606
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003607static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003608 .open = tracing_trace_options_open,
3609 .read = seq_read,
3610 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003611 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003612 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613};
3614
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003615static const char readme_msg[] =
3616 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003617 "# echo 0 > tracing_on : quick way to disable tracing\n"
3618 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3619 " Important files:\n"
3620 " trace\t\t\t- The static contents of the buffer\n"
3621 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3622 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3623 " current_tracer\t- function and latency tracers\n"
3624 " available_tracers\t- list of configured tracers for current_tracer\n"
3625 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3626 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3627 " trace_clock\t\t-change the clock used to order events\n"
3628 " local: Per cpu clock but may not be synced across CPUs\n"
3629 " global: Synced across CPUs but slows tracing down.\n"
3630 " counter: Not a clock, but just an increment\n"
3631 " uptime: Jiffy counter from time of boot\n"
3632 " perf: Same clock that perf events use\n"
3633#ifdef CONFIG_X86_64
3634 " x86-tsc: TSC cycle counter\n"
3635#endif
3636 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3637 " tracing_cpumask\t- Limit which CPUs to trace\n"
3638 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3639 "\t\t\t Remove sub-buffer with rmdir\n"
3640 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003641 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3642 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003643 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003644#ifdef CONFIG_DYNAMIC_FTRACE
3645 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003646 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3647 "\t\t\t functions\n"
3648 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3649 "\t modules: Can select a group via module\n"
3650 "\t Format: :mod:<module-name>\n"
3651 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3652 "\t triggers: a command to perform when function is hit\n"
3653 "\t Format: <function>:<trigger>[:count]\n"
3654 "\t trigger: traceon, traceoff\n"
3655 "\t\t enable_event:<system>:<event>\n"
3656 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003657#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003658 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003659#endif
3660#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003661 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003662#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003663 "\t\t dump\n"
3664 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003665 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3666 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3667 "\t The first one will disable tracing every time do_fault is hit\n"
3668 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3669 "\t The first time do trap is hit and it disables tracing, the\n"
3670 "\t counter will decrement to 2. If tracing is already disabled,\n"
3671 "\t the counter will not decrement. It only decrements when the\n"
3672 "\t trigger did work\n"
3673 "\t To remove trigger without count:\n"
3674 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3675 "\t To remove trigger with a count:\n"
3676 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003677 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003678 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3679 "\t modules: Can select a group via module command :mod:\n"
3680 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003681#endif /* CONFIG_DYNAMIC_FTRACE */
3682#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003683 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3684 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003685#endif
3686#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3687 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3688 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3689#endif
3690#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003691 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3692 "\t\t\t snapshot buffer. Read the contents for more\n"
3693 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003694#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003695#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696 " stack_trace\t\t- Shows the max stack trace when active\n"
3697 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003698 "\t\t\t Write into this file to reset the max size (trigger a\n"
3699 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003700#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003701 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3702 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003703#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003704#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003705 " events/\t\t- Directory containing all trace event subsystems:\n"
3706 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3707 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003708 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3709 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003710 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003711 " events/<system>/<event>/\t- Directory containing control files for\n"
3712 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003713 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3714 " filter\t\t- If set, only events passing filter are traced\n"
3715 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003716 "\t Format: <trigger>[:count][if <filter>]\n"
3717 "\t trigger: traceon, traceoff\n"
3718 "\t enable_event:<system>:<event>\n"
3719 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003720#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003721 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003722#endif
3723#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003724 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003725#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003726 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3727 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3728 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3729 "\t events/block/block_unplug/trigger\n"
3730 "\t The first disables tracing every time block_unplug is hit.\n"
3731 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3732 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3733 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3734 "\t Like function triggers, the counter is only decremented if it\n"
3735 "\t enabled or disabled tracing.\n"
3736 "\t To remove a trigger without a count:\n"
3737 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3738 "\t To remove a trigger with a count:\n"
3739 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3740 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003741;
3742
3743static ssize_t
3744tracing_readme_read(struct file *filp, char __user *ubuf,
3745 size_t cnt, loff_t *ppos)
3746{
3747 return simple_read_from_buffer(ubuf, cnt, ppos,
3748 readme_msg, strlen(readme_msg));
3749}
3750
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003751static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003752 .open = tracing_open_generic,
3753 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003754 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003755};
3756
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003757static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003758{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761 if (*pos || m->count)
3762 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003763
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003764 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003766 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3767 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003769 continue;
3770
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003771 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003772 }
3773
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003774 return NULL;
3775}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003776
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003777static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3778{
3779 void *v;
3780 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003781
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003782 preempt_disable();
3783 arch_spin_lock(&trace_cmdline_lock);
3784
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003785 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003786 while (l <= *pos) {
3787 v = saved_cmdlines_next(m, v, &l);
3788 if (!v)
3789 return NULL;
3790 }
3791
3792 return v;
3793}
3794
3795static void saved_cmdlines_stop(struct seq_file *m, void *v)
3796{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003797 arch_spin_unlock(&trace_cmdline_lock);
3798 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003799}
3800
3801static int saved_cmdlines_show(struct seq_file *m, void *v)
3802{
3803 char buf[TASK_COMM_LEN];
3804 unsigned int *pid = v;
3805
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003806 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003807 seq_printf(m, "%d %s\n", *pid, buf);
3808 return 0;
3809}
3810
3811static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3812 .start = saved_cmdlines_start,
3813 .next = saved_cmdlines_next,
3814 .stop = saved_cmdlines_stop,
3815 .show = saved_cmdlines_show,
3816};
3817
3818static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3819{
3820 if (tracing_disabled)
3821 return -ENODEV;
3822
3823 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003824}
3825
3826static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003827 .open = tracing_saved_cmdlines_open,
3828 .read = seq_read,
3829 .llseek = seq_lseek,
3830 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003831};
3832
3833static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003834tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3835 size_t cnt, loff_t *ppos)
3836{
3837 char buf[64];
3838 int r;
3839
3840 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003841 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003842 arch_spin_unlock(&trace_cmdline_lock);
3843
3844 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3845}
3846
3847static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3848{
3849 kfree(s->saved_cmdlines);
3850 kfree(s->map_cmdline_to_pid);
3851 kfree(s);
3852}
3853
3854static int tracing_resize_saved_cmdlines(unsigned int val)
3855{
3856 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3857
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003858 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003859 if (!s)
3860 return -ENOMEM;
3861
3862 if (allocate_cmdlines_buffer(val, s) < 0) {
3863 kfree(s);
3864 return -ENOMEM;
3865 }
3866
3867 arch_spin_lock(&trace_cmdline_lock);
3868 savedcmd_temp = savedcmd;
3869 savedcmd = s;
3870 arch_spin_unlock(&trace_cmdline_lock);
3871 free_saved_cmdlines_buffer(savedcmd_temp);
3872
3873 return 0;
3874}
3875
3876static ssize_t
3877tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3878 size_t cnt, loff_t *ppos)
3879{
3880 unsigned long val;
3881 int ret;
3882
3883 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3884 if (ret)
3885 return ret;
3886
3887 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3888 if (!val || val > PID_MAX_DEFAULT)
3889 return -EINVAL;
3890
3891 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3892 if (ret < 0)
3893 return ret;
3894
3895 *ppos += cnt;
3896
3897 return cnt;
3898}
3899
3900static const struct file_operations tracing_saved_cmdlines_size_fops = {
3901 .open = tracing_open_generic,
3902 .read = tracing_saved_cmdlines_size_read,
3903 .write = tracing_saved_cmdlines_size_write,
3904};
3905
3906static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907tracing_set_trace_read(struct file *filp, char __user *ubuf,
3908 size_t cnt, loff_t *ppos)
3909{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003910 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003911 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912 int r;
3913
3914 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003915 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003916 mutex_unlock(&trace_types_lock);
3917
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003918 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003919}
3920
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003921int tracer_init(struct tracer *t, struct trace_array *tr)
3922{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003923 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003924 return t->init(tr);
3925}
3926
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003928{
3929 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003930
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003931 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003932 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003933}
3934
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003935#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003936/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3938 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003939{
3940 int cpu, ret = 0;
3941
3942 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3943 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003944 ret = ring_buffer_resize(trace_buf->buffer,
3945 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003946 if (ret < 0)
3947 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003948 per_cpu_ptr(trace_buf->data, cpu)->entries =
3949 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003950 }
3951 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003952 ret = ring_buffer_resize(trace_buf->buffer,
3953 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003954 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003955 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3956 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003957 }
3958
3959 return ret;
3960}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003961#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003962
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003963static int __tracing_resize_ring_buffer(struct trace_array *tr,
3964 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003965{
3966 int ret;
3967
3968 /*
3969 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003970 * we use the size that was given, and we can forget about
3971 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003972 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003973 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003974
Steven Rostedtb382ede62012-10-10 21:44:34 -04003975 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003976 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003977 return 0;
3978
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003979 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003980 if (ret < 0)
3981 return ret;
3982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003984 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3985 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003986 goto out;
3987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003988 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003989 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3991 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003992 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003993 /*
3994 * AARGH! We are left with different
3995 * size max buffer!!!!
3996 * The max buffer is our "snapshot" buffer.
3997 * When a tracer needs a snapshot (one of the
3998 * latency tracers), it swaps the max buffer
3999 * with the saved snap shot. We succeeded to
4000 * update the size of the main buffer, but failed to
4001 * update the size of the max buffer. But when we tried
4002 * to reset the main buffer to the original size, we
4003 * failed there too. This is very unlikely to
4004 * happen, but if it does, warn and kill all
4005 * tracing.
4006 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004007 WARN_ON(1);
4008 tracing_disabled = 1;
4009 }
4010 return ret;
4011 }
4012
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004013 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004014 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004015 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004018 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004019#endif /* CONFIG_TRACER_MAX_TRACE */
4020
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004021 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004022 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004023 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004024 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004025
4026 return ret;
4027}
4028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004029static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4030 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004031{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004032 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004033
4034 mutex_lock(&trace_types_lock);
4035
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004036 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4037 /* make sure, this cpu is enabled in the mask */
4038 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4039 ret = -EINVAL;
4040 goto out;
4041 }
4042 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004043
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004044 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004045 if (ret < 0)
4046 ret = -ENOMEM;
4047
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004048out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004049 mutex_unlock(&trace_types_lock);
4050
4051 return ret;
4052}
4053
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004054
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004055/**
4056 * tracing_update_buffers - used by tracing facility to expand ring buffers
4057 *
4058 * To save on memory when the tracing is never used on a system with it
4059 * configured in. The ring buffers are set to a minimum size. But once
4060 * a user starts to use the tracing facility, then they need to grow
4061 * to their default size.
4062 *
4063 * This function is to be called when a tracer is about to be used.
4064 */
4065int tracing_update_buffers(void)
4066{
4067 int ret = 0;
4068
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004069 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004070 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004071 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004072 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004073 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004074
4075 return ret;
4076}
4077
Steven Rostedt577b7852009-02-26 23:43:05 -05004078struct trace_option_dentry;
4079
4080static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004081create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004082
4083static void
4084destroy_trace_option_files(struct trace_option_dentry *topts);
4085
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004086/*
4087 * Used to clear out the tracer before deletion of an instance.
4088 * Must have trace_types_lock held.
4089 */
4090static void tracing_set_nop(struct trace_array *tr)
4091{
4092 if (tr->current_trace == &nop_trace)
4093 return;
4094
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004095 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004096
4097 if (tr->current_trace->reset)
4098 tr->current_trace->reset(tr);
4099
4100 tr->current_trace = &nop_trace;
4101}
4102
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004103static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004104{
Steven Rostedt577b7852009-02-26 23:43:05 -05004105 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004107#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004108 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004109#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004110 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004111
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004112 mutex_lock(&trace_types_lock);
4113
Steven Rostedt73c51622009-03-11 13:42:01 -04004114 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004115 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004116 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004117 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004118 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004119 ret = 0;
4120 }
4121
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004122 for (t = trace_types; t; t = t->next) {
4123 if (strcmp(t->name, buf) == 0)
4124 break;
4125 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004126 if (!t) {
4127 ret = -EINVAL;
4128 goto out;
4129 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004130 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004131 goto out;
4132
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004133 /* Some tracers are only allowed for the top level buffer */
4134 if (!trace_ok_for_array(t, tr)) {
4135 ret = -EINVAL;
4136 goto out;
4137 }
4138
Steven Rostedt9f029e82008-11-12 15:24:24 -05004139 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004140
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004141 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004142
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004143 if (tr->current_trace->reset)
4144 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004145
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004146 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004147 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004148
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004149#ifdef CONFIG_TRACER_MAX_TRACE
4150 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004151
4152 if (had_max_tr && !t->use_max_tr) {
4153 /*
4154 * We need to make sure that the update_max_tr sees that
4155 * current_trace changed to nop_trace to keep it from
4156 * swapping the buffers after we resize it.
4157 * The update_max_tr is called from interrupts disabled
4158 * so a synchronized_sched() is sufficient.
4159 */
4160 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004161 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004162 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004163#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004164 /* Currently, only the top instance has options */
4165 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4166 destroy_trace_option_files(topts);
4167 topts = create_trace_option_files(tr, t);
4168 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004169
4170#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004171 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004172 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004173 if (ret < 0)
4174 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004175 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004176#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004177
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004178 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004179 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004180 if (ret)
4181 goto out;
4182 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004183
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004184 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004185 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004186 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004187 out:
4188 mutex_unlock(&trace_types_lock);
4189
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004190 return ret;
4191}
4192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193static ssize_t
4194tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4195 size_t cnt, loff_t *ppos)
4196{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004197 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004198 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004199 int i;
4200 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004201 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004202
Steven Rostedt60063a62008-10-28 10:44:24 -04004203 ret = cnt;
4204
Li Zefanee6c2c12009-09-18 14:06:47 +08004205 if (cnt > MAX_TRACER_SIZE)
4206 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207
4208 if (copy_from_user(&buf, ubuf, cnt))
4209 return -EFAULT;
4210
4211 buf[cnt] = 0;
4212
4213 /* strip ending whitespace. */
4214 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4215 buf[i] = 0;
4216
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004217 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004218 if (err)
4219 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004220
Jiri Olsacf8517c2009-10-23 19:36:16 -04004221 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004223 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224}
4225
4226static ssize_t
4227tracing_max_lat_read(struct file *filp, char __user *ubuf,
4228 size_t cnt, loff_t *ppos)
4229{
4230 unsigned long *ptr = filp->private_data;
4231 char buf[64];
4232 int r;
4233
Steven Rostedtcffae432008-05-12 21:21:00 +02004234 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004235 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004236 if (r > sizeof(buf))
4237 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004238 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239}
4240
4241static ssize_t
4242tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4243 size_t cnt, loff_t *ppos)
4244{
Hannes Eder5e398412009-02-10 19:44:34 +01004245 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004246 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004247 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248
Peter Huewe22fe9b52011-06-07 21:58:27 +02004249 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4250 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004251 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252
4253 *ptr = val * 1000;
4254
4255 return cnt;
4256}
4257
Steven Rostedtb3806b42008-05-12 21:20:46 +02004258static int tracing_open_pipe(struct inode *inode, struct file *filp)
4259{
Oleg Nesterov15544202013-07-23 17:25:57 +02004260 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004261 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004262 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004263
4264 if (tracing_disabled)
4265 return -ENODEV;
4266
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004267 if (trace_array_get(tr) < 0)
4268 return -ENODEV;
4269
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004270 mutex_lock(&trace_types_lock);
4271
Steven Rostedtb3806b42008-05-12 21:20:46 +02004272 /* create a buffer to store the information to pass to userspace */
4273 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004274 if (!iter) {
4275 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004276 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004277 goto out;
4278 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004279
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004280 /*
4281 * We make a copy of the current tracer to avoid concurrent
4282 * changes on it while we are reading.
4283 */
4284 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4285 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004286 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004287 goto fail;
4288 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004289 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004290
4291 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4292 ret = -ENOMEM;
4293 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304294 }
4295
Steven Rostedta3097202008-11-07 22:36:02 -05004296 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304297 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004298
Steven Rostedt112f38a72009-06-01 15:16:05 -04004299 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4300 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4301
David Sharp8be07092012-11-13 12:18:22 -08004302 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004303 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004304 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4305
Oleg Nesterov15544202013-07-23 17:25:57 +02004306 iter->tr = tr;
4307 iter->trace_buffer = &tr->trace_buffer;
4308 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004309 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004310 filp->private_data = iter;
4311
Steven Rostedt107bad82008-05-12 21:21:01 +02004312 if (iter->trace->pipe_open)
4313 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004314
Arnd Bergmannb4447862010-07-07 23:40:11 +02004315 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004316out:
4317 mutex_unlock(&trace_types_lock);
4318 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004319
4320fail:
4321 kfree(iter->trace);
4322 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004323 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004324 mutex_unlock(&trace_types_lock);
4325 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004326}
4327
4328static int tracing_release_pipe(struct inode *inode, struct file *file)
4329{
4330 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004331 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004332
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004333 mutex_lock(&trace_types_lock);
4334
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004335 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004336 iter->trace->pipe_close(iter);
4337
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004338 mutex_unlock(&trace_types_lock);
4339
Rusty Russell44623442009-01-01 10:12:23 +10304340 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004341 mutex_destroy(&iter->mutex);
4342 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004343 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004344
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004345 trace_array_put(tr);
4346
Steven Rostedtb3806b42008-05-12 21:20:46 +02004347 return 0;
4348}
4349
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004350static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004351trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004352{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004353 /* Iterators are static, they should be filled or empty */
4354 if (trace_buffer_iter(iter, iter->cpu_file))
4355 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004356
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004357 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004358 /*
4359 * Always select as readable when in blocking mode
4360 */
4361 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004362 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004363 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004364 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004365}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004366
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004367static unsigned int
4368tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4369{
4370 struct trace_iterator *iter = filp->private_data;
4371
4372 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004373}
4374
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004375/* Must be called with trace_types_lock mutex held. */
4376static int tracing_wait_pipe(struct file *filp)
4377{
4378 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004379 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004380
4381 while (trace_empty(iter)) {
4382
4383 if ((filp->f_flags & O_NONBLOCK)) {
4384 return -EAGAIN;
4385 }
4386
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004387 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004388 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004389 * We still block if tracing is disabled, but we have never
4390 * read anything. This allows a user to cat this file, and
4391 * then enable tracing. But after we have read something,
4392 * we give an EOF when tracing is again disabled.
4393 *
4394 * iter->pos will be 0 if we haven't read anything.
4395 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004396 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004397 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004398
4399 mutex_unlock(&iter->mutex);
4400
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004401 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004402
4403 mutex_lock(&iter->mutex);
4404
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004405 if (ret)
4406 return ret;
4407
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004408 if (signal_pending(current))
4409 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004410 }
4411
4412 return 1;
4413}
4414
Steven Rostedtb3806b42008-05-12 21:20:46 +02004415/*
4416 * Consumer reader.
4417 */
4418static ssize_t
4419tracing_read_pipe(struct file *filp, char __user *ubuf,
4420 size_t cnt, loff_t *ppos)
4421{
4422 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004423 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004424 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004425
4426 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004427 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4428 if (sret != -EBUSY)
4429 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004430
Steven Rostedtf9520752009-03-02 14:04:40 -05004431 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004432
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004433 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004434 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004435 if (unlikely(iter->trace->name != tr->current_trace->name))
4436 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004437 mutex_unlock(&trace_types_lock);
4438
4439 /*
4440 * Avoid more than one consumer on a single file descriptor
4441 * This is just a matter of traces coherency, the ring buffer itself
4442 * is protected.
4443 */
4444 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004445 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004446 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4447 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004448 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004449 }
4450
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004451waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004452 sret = tracing_wait_pipe(filp);
4453 if (sret <= 0)
4454 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004455
4456 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004457 if (trace_empty(iter)) {
4458 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004459 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004460 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004461
4462 if (cnt >= PAGE_SIZE)
4463 cnt = PAGE_SIZE - 1;
4464
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004465 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004466 memset(&iter->seq, 0,
4467 sizeof(struct trace_iterator) -
4468 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004469 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004470 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004471
Lai Jiangshan4f535962009-05-18 19:35:34 +08004472 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004473 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004474 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004475 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004476 int len = iter->seq.len;
4477
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004478 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004479 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004480 /* don't print partial lines */
4481 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004482 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004483 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004484 if (ret != TRACE_TYPE_NO_CONSUME)
4485 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004486
4487 if (iter->seq.len >= cnt)
4488 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004489
4490 /*
4491 * Setting the full flag means we reached the trace_seq buffer
4492 * size and we should leave by partial output condition above.
4493 * One of the trace_seq_* functions is not used properly.
4494 */
4495 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4496 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004497 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004498 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004499 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004500
Steven Rostedtb3806b42008-05-12 21:20:46 +02004501 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004502 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4503 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004504 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004505
4506 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004507 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004508 * entries, go back to wait for more entries.
4509 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004510 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004511 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004512
Steven Rostedt107bad82008-05-12 21:21:01 +02004513out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004514 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004515
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004516 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004517}
4518
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004519static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4520 unsigned int idx)
4521{
4522 __free_page(spd->pages[idx]);
4523}
4524
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004525static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004526 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004527 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004528 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004529 .steal = generic_pipe_buf_steal,
4530 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004531};
4532
Steven Rostedt34cd4992009-02-09 12:06:29 -05004533static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004534tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004535{
4536 size_t count;
4537 int ret;
4538
4539 /* Seq buffer is page-sized, exactly what we need. */
4540 for (;;) {
4541 count = iter->seq.len;
4542 ret = print_trace_line(iter);
4543 count = iter->seq.len - count;
4544 if (rem < count) {
4545 rem = 0;
4546 iter->seq.len -= count;
4547 break;
4548 }
4549 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4550 iter->seq.len -= count;
4551 break;
4552 }
4553
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004554 if (ret != TRACE_TYPE_NO_CONSUME)
4555 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004556 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004557 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004558 rem = 0;
4559 iter->ent = NULL;
4560 break;
4561 }
4562 }
4563
4564 return rem;
4565}
4566
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004567static ssize_t tracing_splice_read_pipe(struct file *filp,
4568 loff_t *ppos,
4569 struct pipe_inode_info *pipe,
4570 size_t len,
4571 unsigned int flags)
4572{
Jens Axboe35f3d142010-05-20 10:43:18 +02004573 struct page *pages_def[PIPE_DEF_BUFFERS];
4574 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004575 struct trace_iterator *iter = filp->private_data;
4576 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004577 .pages = pages_def,
4578 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004579 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004580 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004581 .flags = flags,
4582 .ops = &tracing_pipe_buf_ops,
4583 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004584 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004585 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004586 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004587 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004588 unsigned int i;
4589
Jens Axboe35f3d142010-05-20 10:43:18 +02004590 if (splice_grow_spd(pipe, &spd))
4591 return -ENOMEM;
4592
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004593 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004594 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004595 if (unlikely(iter->trace->name != tr->current_trace->name))
4596 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004597 mutex_unlock(&trace_types_lock);
4598
4599 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004600
4601 if (iter->trace->splice_read) {
4602 ret = iter->trace->splice_read(iter, filp,
4603 ppos, pipe, len, flags);
4604 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004605 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004606 }
4607
4608 ret = tracing_wait_pipe(filp);
4609 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004610 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004611
Jason Wessel955b61e2010-08-05 09:22:23 -05004612 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004613 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004614 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004615 }
4616
Lai Jiangshan4f535962009-05-18 19:35:34 +08004617 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004618 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004619
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004620 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004621 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004622 spd.pages[i] = alloc_page(GFP_KERNEL);
4623 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004624 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004625
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004626 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004627
4628 /* Copy the data into the page, so we can start over. */
4629 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004630 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004631 iter->seq.len);
4632 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004633 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004634 break;
4635 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004636 spd.partial[i].offset = 0;
4637 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004638
Steven Rostedtf9520752009-03-02 14:04:40 -05004639 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004640 }
4641
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004642 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004643 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004644 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004645
4646 spd.nr_pages = i;
4647
Jens Axboe35f3d142010-05-20 10:43:18 +02004648 ret = splice_to_pipe(pipe, &spd);
4649out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004650 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004651 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004652
Steven Rostedt34cd4992009-02-09 12:06:29 -05004653out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004654 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004655 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004656}
4657
Steven Rostedta98a3c32008-05-12 21:20:59 +02004658static ssize_t
4659tracing_entries_read(struct file *filp, char __user *ubuf,
4660 size_t cnt, loff_t *ppos)
4661{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004662 struct inode *inode = file_inode(filp);
4663 struct trace_array *tr = inode->i_private;
4664 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004665 char buf[64];
4666 int r = 0;
4667 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004668
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004669 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004670
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004671 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004672 int cpu, buf_size_same;
4673 unsigned long size;
4674
4675 size = 0;
4676 buf_size_same = 1;
4677 /* check if all cpu sizes are same */
4678 for_each_tracing_cpu(cpu) {
4679 /* fill in the size from first enabled cpu */
4680 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004681 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4682 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004683 buf_size_same = 0;
4684 break;
4685 }
4686 }
4687
4688 if (buf_size_same) {
4689 if (!ring_buffer_expanded)
4690 r = sprintf(buf, "%lu (expanded: %lu)\n",
4691 size >> 10,
4692 trace_buf_size >> 10);
4693 else
4694 r = sprintf(buf, "%lu\n", size >> 10);
4695 } else
4696 r = sprintf(buf, "X\n");
4697 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004698 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004699
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004700 mutex_unlock(&trace_types_lock);
4701
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004702 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4703 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004704}
4705
4706static ssize_t
4707tracing_entries_write(struct file *filp, const char __user *ubuf,
4708 size_t cnt, loff_t *ppos)
4709{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004710 struct inode *inode = file_inode(filp);
4711 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004712 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004713 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004714
Peter Huewe22fe9b52011-06-07 21:58:27 +02004715 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4716 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004717 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004718
4719 /* must have at least 1 entry */
4720 if (!val)
4721 return -EINVAL;
4722
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004723 /* value is in KB */
4724 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004725 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004726 if (ret < 0)
4727 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004728
Jiri Olsacf8517c2009-10-23 19:36:16 -04004729 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004730
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004731 return cnt;
4732}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004733
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004734static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004735tracing_total_entries_read(struct file *filp, char __user *ubuf,
4736 size_t cnt, loff_t *ppos)
4737{
4738 struct trace_array *tr = filp->private_data;
4739 char buf[64];
4740 int r, cpu;
4741 unsigned long size = 0, expanded_size = 0;
4742
4743 mutex_lock(&trace_types_lock);
4744 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004745 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004746 if (!ring_buffer_expanded)
4747 expanded_size += trace_buf_size >> 10;
4748 }
4749 if (ring_buffer_expanded)
4750 r = sprintf(buf, "%lu\n", size);
4751 else
4752 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4753 mutex_unlock(&trace_types_lock);
4754
4755 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4756}
4757
4758static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004759tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4760 size_t cnt, loff_t *ppos)
4761{
4762 /*
4763 * There is no need to read what the user has written, this function
4764 * is just to make sure that there is no error when "echo" is used
4765 */
4766
4767 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004768
4769 return cnt;
4770}
4771
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004772static int
4773tracing_free_buffer_release(struct inode *inode, struct file *filp)
4774{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004775 struct trace_array *tr = inode->i_private;
4776
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004777 /* disable tracing ? */
4778 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004779 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004780 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004781 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004782
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004783 trace_array_put(tr);
4784
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004785 return 0;
4786}
4787
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004788static ssize_t
4789tracing_mark_write(struct file *filp, const char __user *ubuf,
4790 size_t cnt, loff_t *fpos)
4791{
Steven Rostedtd696b582011-09-22 11:50:27 -04004792 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004793 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004794 struct ring_buffer_event *event;
4795 struct ring_buffer *buffer;
4796 struct print_entry *entry;
4797 unsigned long irq_flags;
4798 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004799 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004800 int nr_pages = 1;
4801 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004802 int offset;
4803 int size;
4804 int len;
4805 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004806 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004807
Steven Rostedtc76f0692008-11-07 22:36:02 -05004808 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004809 return -EINVAL;
4810
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004811 if (!(trace_flags & TRACE_ITER_MARKERS))
4812 return -EINVAL;
4813
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004814 if (cnt > TRACE_BUF_SIZE)
4815 cnt = TRACE_BUF_SIZE;
4816
Steven Rostedtd696b582011-09-22 11:50:27 -04004817 /*
4818 * Userspace is injecting traces into the kernel trace buffer.
4819 * We want to be as non intrusive as possible.
4820 * To do so, we do not want to allocate any special buffers
4821 * or take any locks, but instead write the userspace data
4822 * straight into the ring buffer.
4823 *
4824 * First we need to pin the userspace buffer into memory,
4825 * which, most likely it is, because it just referenced it.
4826 * But there's no guarantee that it is. By using get_user_pages_fast()
4827 * and kmap_atomic/kunmap_atomic() we can get access to the
4828 * pages directly. We then write the data directly into the
4829 * ring buffer.
4830 */
4831 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004832
Steven Rostedtd696b582011-09-22 11:50:27 -04004833 /* check if we cross pages */
4834 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4835 nr_pages = 2;
4836
4837 offset = addr & (PAGE_SIZE - 1);
4838 addr &= PAGE_MASK;
4839
4840 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4841 if (ret < nr_pages) {
4842 while (--ret >= 0)
4843 put_page(pages[ret]);
4844 written = -EFAULT;
4845 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004846 }
4847
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004848 for (i = 0; i < nr_pages; i++)
4849 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004850
4851 local_save_flags(irq_flags);
4852 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004853 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004854 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4855 irq_flags, preempt_count());
4856 if (!event) {
4857 /* Ring buffer disabled, return as if not open for write */
4858 written = -EBADF;
4859 goto out_unlock;
4860 }
4861
4862 entry = ring_buffer_event_data(event);
4863 entry->ip = _THIS_IP_;
4864
4865 if (nr_pages == 2) {
4866 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004867 memcpy(&entry->buf, map_page[0] + offset, len);
4868 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004869 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004870 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004871
4872 if (entry->buf[cnt - 1] != '\n') {
4873 entry->buf[cnt] = '\n';
4874 entry->buf[cnt + 1] = '\0';
4875 } else
4876 entry->buf[cnt] = '\0';
4877
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004878 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004879
4880 written = cnt;
4881
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004882 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004883
Steven Rostedtd696b582011-09-22 11:50:27 -04004884 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004885 for (i = 0; i < nr_pages; i++){
4886 kunmap_atomic(map_page[i]);
4887 put_page(pages[i]);
4888 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004889 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004890 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004891}
4892
Li Zefan13f16d22009-12-08 11:16:11 +08004893static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004894{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004895 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004896 int i;
4897
4898 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004899 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004900 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004901 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4902 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004903 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004904
Li Zefan13f16d22009-12-08 11:16:11 +08004905 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004906}
4907
Steven Rostedte1e232c2014-02-10 23:38:46 -05004908static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004909{
Zhaolei5079f322009-08-25 16:12:56 +08004910 int i;
4911
Zhaolei5079f322009-08-25 16:12:56 +08004912 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4913 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4914 break;
4915 }
4916 if (i == ARRAY_SIZE(trace_clocks))
4917 return -EINVAL;
4918
Zhaolei5079f322009-08-25 16:12:56 +08004919 mutex_lock(&trace_types_lock);
4920
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004921 tr->clock_id = i;
4922
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004923 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004924
David Sharp60303ed2012-10-11 16:27:52 -07004925 /*
4926 * New clock may not be consistent with the previous clock.
4927 * Reset the buffer so that it doesn't have incomparable timestamps.
4928 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004929 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004930
4931#ifdef CONFIG_TRACER_MAX_TRACE
4932 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4933 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004934 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004935#endif
David Sharp60303ed2012-10-11 16:27:52 -07004936
Zhaolei5079f322009-08-25 16:12:56 +08004937 mutex_unlock(&trace_types_lock);
4938
Steven Rostedte1e232c2014-02-10 23:38:46 -05004939 return 0;
4940}
4941
4942static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4943 size_t cnt, loff_t *fpos)
4944{
4945 struct seq_file *m = filp->private_data;
4946 struct trace_array *tr = m->private;
4947 char buf[64];
4948 const char *clockstr;
4949 int ret;
4950
4951 if (cnt >= sizeof(buf))
4952 return -EINVAL;
4953
4954 if (copy_from_user(&buf, ubuf, cnt))
4955 return -EFAULT;
4956
4957 buf[cnt] = 0;
4958
4959 clockstr = strstrip(buf);
4960
4961 ret = tracing_set_clock(tr, clockstr);
4962 if (ret)
4963 return ret;
4964
Zhaolei5079f322009-08-25 16:12:56 +08004965 *fpos += cnt;
4966
4967 return cnt;
4968}
4969
Li Zefan13f16d22009-12-08 11:16:11 +08004970static int tracing_clock_open(struct inode *inode, struct file *file)
4971{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004972 struct trace_array *tr = inode->i_private;
4973 int ret;
4974
Li Zefan13f16d22009-12-08 11:16:11 +08004975 if (tracing_disabled)
4976 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004977
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004978 if (trace_array_get(tr))
4979 return -ENODEV;
4980
4981 ret = single_open(file, tracing_clock_show, inode->i_private);
4982 if (ret < 0)
4983 trace_array_put(tr);
4984
4985 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004986}
4987
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004988struct ftrace_buffer_info {
4989 struct trace_iterator iter;
4990 void *spare;
4991 unsigned int read;
4992};
4993
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004994#ifdef CONFIG_TRACER_SNAPSHOT
4995static int tracing_snapshot_open(struct inode *inode, struct file *file)
4996{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004997 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004998 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004999 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005000 int ret = 0;
5001
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005002 if (trace_array_get(tr) < 0)
5003 return -ENODEV;
5004
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005005 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005006 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005007 if (IS_ERR(iter))
5008 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005009 } else {
5010 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005011 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005012 m = kzalloc(sizeof(*m), GFP_KERNEL);
5013 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005014 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005015 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5016 if (!iter) {
5017 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005018 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005019 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005020 ret = 0;
5021
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005022 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005023 iter->trace_buffer = &tr->max_buffer;
5024 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005025 m->private = iter;
5026 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005027 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005028out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005029 if (ret < 0)
5030 trace_array_put(tr);
5031
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005032 return ret;
5033}
5034
5035static ssize_t
5036tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5037 loff_t *ppos)
5038{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005039 struct seq_file *m = filp->private_data;
5040 struct trace_iterator *iter = m->private;
5041 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005042 unsigned long val;
5043 int ret;
5044
5045 ret = tracing_update_buffers();
5046 if (ret < 0)
5047 return ret;
5048
5049 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5050 if (ret)
5051 return ret;
5052
5053 mutex_lock(&trace_types_lock);
5054
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005055 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005056 ret = -EBUSY;
5057 goto out;
5058 }
5059
5060 switch (val) {
5061 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005062 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5063 ret = -EINVAL;
5064 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005065 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005066 if (tr->allocated_snapshot)
5067 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005068 break;
5069 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005070/* Only allow per-cpu swap if the ring buffer supports it */
5071#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5072 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5073 ret = -EINVAL;
5074 break;
5075 }
5076#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005077 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005078 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005079 if (ret < 0)
5080 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005081 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005082 local_irq_disable();
5083 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005084 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005085 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005086 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005087 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005088 local_irq_enable();
5089 break;
5090 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005091 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005092 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5093 tracing_reset_online_cpus(&tr->max_buffer);
5094 else
5095 tracing_reset(&tr->max_buffer, iter->cpu_file);
5096 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005097 break;
5098 }
5099
5100 if (ret >= 0) {
5101 *ppos += cnt;
5102 ret = cnt;
5103 }
5104out:
5105 mutex_unlock(&trace_types_lock);
5106 return ret;
5107}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005108
5109static int tracing_snapshot_release(struct inode *inode, struct file *file)
5110{
5111 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005112 int ret;
5113
5114 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005115
5116 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005117 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005118
5119 /* If write only, the seq_file is just a stub */
5120 if (m)
5121 kfree(m->private);
5122 kfree(m);
5123
5124 return 0;
5125}
5126
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005127static int tracing_buffers_open(struct inode *inode, struct file *filp);
5128static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5129 size_t count, loff_t *ppos);
5130static int tracing_buffers_release(struct inode *inode, struct file *file);
5131static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5132 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5133
5134static int snapshot_raw_open(struct inode *inode, struct file *filp)
5135{
5136 struct ftrace_buffer_info *info;
5137 int ret;
5138
5139 ret = tracing_buffers_open(inode, filp);
5140 if (ret < 0)
5141 return ret;
5142
5143 info = filp->private_data;
5144
5145 if (info->iter.trace->use_max_tr) {
5146 tracing_buffers_release(inode, filp);
5147 return -EBUSY;
5148 }
5149
5150 info->iter.snapshot = true;
5151 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5152
5153 return ret;
5154}
5155
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005156#endif /* CONFIG_TRACER_SNAPSHOT */
5157
5158
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005159static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005160 .open = tracing_open_generic,
5161 .read = tracing_max_lat_read,
5162 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005163 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005164};
5165
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005166static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005167 .open = tracing_open_generic,
5168 .read = tracing_set_trace_read,
5169 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005170 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005171};
5172
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005173static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005174 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005175 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005176 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005177 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005178 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005179 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005180};
5181
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005182static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005183 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005184 .read = tracing_entries_read,
5185 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005186 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005187 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005188};
5189
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005190static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005191 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005192 .read = tracing_total_entries_read,
5193 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005194 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005195};
5196
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005197static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005198 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005199 .write = tracing_free_buffer_write,
5200 .release = tracing_free_buffer_release,
5201};
5202
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005203static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005204 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005205 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005206 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005207 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005208};
5209
Zhaolei5079f322009-08-25 16:12:56 +08005210static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005211 .open = tracing_clock_open,
5212 .read = seq_read,
5213 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005214 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005215 .write = tracing_clock_write,
5216};
5217
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005218#ifdef CONFIG_TRACER_SNAPSHOT
5219static const struct file_operations snapshot_fops = {
5220 .open = tracing_snapshot_open,
5221 .read = seq_read,
5222 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005223 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005224 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005225};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005226
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005227static const struct file_operations snapshot_raw_fops = {
5228 .open = snapshot_raw_open,
5229 .read = tracing_buffers_read,
5230 .release = tracing_buffers_release,
5231 .splice_read = tracing_buffers_splice_read,
5232 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005233};
5234
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005235#endif /* CONFIG_TRACER_SNAPSHOT */
5236
Steven Rostedt2cadf912008-12-01 22:20:19 -05005237static int tracing_buffers_open(struct inode *inode, struct file *filp)
5238{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005239 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005240 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005241 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005242
5243 if (tracing_disabled)
5244 return -ENODEV;
5245
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 if (trace_array_get(tr) < 0)
5247 return -ENODEV;
5248
Steven Rostedt2cadf912008-12-01 22:20:19 -05005249 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005250 if (!info) {
5251 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005252 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005253 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005254
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005255 mutex_lock(&trace_types_lock);
5256
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005257 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005258 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005259 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005260 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005261 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005262 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005263 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005264
5265 filp->private_data = info;
5266
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005267 mutex_unlock(&trace_types_lock);
5268
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005269 ret = nonseekable_open(inode, filp);
5270 if (ret < 0)
5271 trace_array_put(tr);
5272
5273 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005274}
5275
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005276static unsigned int
5277tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5278{
5279 struct ftrace_buffer_info *info = filp->private_data;
5280 struct trace_iterator *iter = &info->iter;
5281
5282 return trace_poll(iter, filp, poll_table);
5283}
5284
Steven Rostedt2cadf912008-12-01 22:20:19 -05005285static ssize_t
5286tracing_buffers_read(struct file *filp, char __user *ubuf,
5287 size_t count, loff_t *ppos)
5288{
5289 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005290 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005291 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005292 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005293
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005294 if (!count)
5295 return 0;
5296
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005297 mutex_lock(&trace_types_lock);
5298
5299#ifdef CONFIG_TRACER_MAX_TRACE
5300 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5301 size = -EBUSY;
5302 goto out_unlock;
5303 }
5304#endif
5305
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005306 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005307 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5308 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005309 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005310 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005311 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005312
Steven Rostedt2cadf912008-12-01 22:20:19 -05005313 /* Do we have previous read data to read? */
5314 if (info->read < PAGE_SIZE)
5315 goto read;
5316
Steven Rostedtb6273442013-02-28 13:44:11 -05005317 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005318 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005319 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005320 &info->spare,
5321 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005322 iter->cpu_file, 0);
5323 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005324
5325 if (ret < 0) {
5326 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005327 if ((filp->f_flags & O_NONBLOCK)) {
5328 size = -EAGAIN;
5329 goto out_unlock;
5330 }
5331 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005332 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005333 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005334 if (ret) {
5335 size = ret;
5336 goto out_unlock;
5337 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005338 if (signal_pending(current)) {
5339 size = -EINTR;
5340 goto out_unlock;
5341 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005342 goto again;
5343 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005344 size = 0;
5345 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005346 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005347
Steven Rostedt436fc282011-10-14 10:44:25 -04005348 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005349 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005350 size = PAGE_SIZE - info->read;
5351 if (size > count)
5352 size = count;
5353
5354 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005355 if (ret == size) {
5356 size = -EFAULT;
5357 goto out_unlock;
5358 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005359 size -= ret;
5360
Steven Rostedt2cadf912008-12-01 22:20:19 -05005361 *ppos += size;
5362 info->read += size;
5363
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005364 out_unlock:
5365 mutex_unlock(&trace_types_lock);
5366
Steven Rostedt2cadf912008-12-01 22:20:19 -05005367 return size;
5368}
5369
5370static int tracing_buffers_release(struct inode *inode, struct file *file)
5371{
5372 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005373 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005374
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005375 mutex_lock(&trace_types_lock);
5376
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005377 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005378
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005379 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005380 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005381 kfree(info);
5382
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005383 mutex_unlock(&trace_types_lock);
5384
Steven Rostedt2cadf912008-12-01 22:20:19 -05005385 return 0;
5386}
5387
5388struct buffer_ref {
5389 struct ring_buffer *buffer;
5390 void *page;
5391 int ref;
5392};
5393
5394static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5395 struct pipe_buffer *buf)
5396{
5397 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5398
5399 if (--ref->ref)
5400 return;
5401
5402 ring_buffer_free_read_page(ref->buffer, ref->page);
5403 kfree(ref);
5404 buf->private = 0;
5405}
5406
Steven Rostedt2cadf912008-12-01 22:20:19 -05005407static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5408 struct pipe_buffer *buf)
5409{
5410 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5411
5412 ref->ref++;
5413}
5414
5415/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005416static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005417 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005418 .confirm = generic_pipe_buf_confirm,
5419 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005420 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005421 .get = buffer_pipe_buf_get,
5422};
5423
5424/*
5425 * Callback from splice_to_pipe(), if we need to release some pages
5426 * at the end of the spd in case we error'ed out in filling the pipe.
5427 */
5428static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5429{
5430 struct buffer_ref *ref =
5431 (struct buffer_ref *)spd->partial[i].private;
5432
5433 if (--ref->ref)
5434 return;
5435
5436 ring_buffer_free_read_page(ref->buffer, ref->page);
5437 kfree(ref);
5438 spd->partial[i].private = 0;
5439}
5440
5441static ssize_t
5442tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5443 struct pipe_inode_info *pipe, size_t len,
5444 unsigned int flags)
5445{
5446 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005447 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005448 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5449 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005450 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005451 .pages = pages_def,
5452 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005453 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005454 .flags = flags,
5455 .ops = &buffer_pipe_buf_ops,
5456 .spd_release = buffer_spd_release,
5457 };
5458 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005459 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005460 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005461
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005462 mutex_lock(&trace_types_lock);
5463
5464#ifdef CONFIG_TRACER_MAX_TRACE
5465 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5466 ret = -EBUSY;
5467 goto out;
5468 }
5469#endif
5470
5471 if (splice_grow_spd(pipe, &spd)) {
5472 ret = -ENOMEM;
5473 goto out;
5474 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005475
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005476 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005477 ret = -EINVAL;
5478 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005479 }
5480
5481 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005482 if (len < PAGE_SIZE) {
5483 ret = -EINVAL;
5484 goto out;
5485 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005486 len &= PAGE_MASK;
5487 }
5488
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005489 again:
5490 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005491 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005492
Al Viroa786c062014-04-11 12:01:03 -04005493 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005494 struct page *page;
5495 int r;
5496
5497 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5498 if (!ref)
5499 break;
5500
Steven Rostedt7267fa62009-04-29 00:16:21 -04005501 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005502 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005503 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005504 if (!ref->page) {
5505 kfree(ref);
5506 break;
5507 }
5508
5509 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005510 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005511 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005512 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005513 kfree(ref);
5514 break;
5515 }
5516
5517 /*
5518 * zero out any left over data, this is going to
5519 * user land.
5520 */
5521 size = ring_buffer_page_len(ref->page);
5522 if (size < PAGE_SIZE)
5523 memset(ref->page + size, 0, PAGE_SIZE - size);
5524
5525 page = virt_to_page(ref->page);
5526
5527 spd.pages[i] = page;
5528 spd.partial[i].len = PAGE_SIZE;
5529 spd.partial[i].offset = 0;
5530 spd.partial[i].private = (unsigned long)ref;
5531 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005532 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005533
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005534 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005535 }
5536
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005537 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005538 spd.nr_pages = i;
5539
5540 /* did we read anything? */
5541 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005542 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005543 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005544 goto out;
5545 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005546 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005547 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005548 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005549 if (ret)
5550 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005551 if (signal_pending(current)) {
5552 ret = -EINTR;
5553 goto out;
5554 }
5555 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005556 }
5557
5558 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005559 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005560out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005561 mutex_unlock(&trace_types_lock);
5562
Steven Rostedt2cadf912008-12-01 22:20:19 -05005563 return ret;
5564}
5565
5566static const struct file_operations tracing_buffers_fops = {
5567 .open = tracing_buffers_open,
5568 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005569 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005570 .release = tracing_buffers_release,
5571 .splice_read = tracing_buffers_splice_read,
5572 .llseek = no_llseek,
5573};
5574
Steven Rostedtc8d77182009-04-29 18:03:45 -04005575static ssize_t
5576tracing_stats_read(struct file *filp, char __user *ubuf,
5577 size_t count, loff_t *ppos)
5578{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005579 struct inode *inode = file_inode(filp);
5580 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005581 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005582 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005583 struct trace_seq *s;
5584 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005585 unsigned long long t;
5586 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005587
Li Zefane4f2d102009-06-15 10:57:28 +08005588 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005589 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005590 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005591
5592 trace_seq_init(s);
5593
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005594 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005595 trace_seq_printf(s, "entries: %ld\n", cnt);
5596
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005597 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005598 trace_seq_printf(s, "overrun: %ld\n", cnt);
5599
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005600 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005601 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5602
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005603 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005604 trace_seq_printf(s, "bytes: %ld\n", cnt);
5605
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005606 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005607 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005608 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005609 usec_rem = do_div(t, USEC_PER_SEC);
5610 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5611 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005612
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005613 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005614 usec_rem = do_div(t, USEC_PER_SEC);
5615 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5616 } else {
5617 /* counter or tsc mode for trace_clock */
5618 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005619 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005620
5621 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005622 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005623 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005624
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005625 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005626 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5627
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005628 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005629 trace_seq_printf(s, "read events: %ld\n", cnt);
5630
Steven Rostedtc8d77182009-04-29 18:03:45 -04005631 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5632
5633 kfree(s);
5634
5635 return count;
5636}
5637
5638static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005639 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005640 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005641 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005642 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005643};
5644
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005645#ifdef CONFIG_DYNAMIC_FTRACE
5646
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005647int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005648{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005649 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005650}
5651
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005652static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005653tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005654 size_t cnt, loff_t *ppos)
5655{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005656 static char ftrace_dyn_info_buffer[1024];
5657 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005658 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005659 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005660 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005661 int r;
5662
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005663 mutex_lock(&dyn_info_mutex);
5664 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005665
Steven Rostedta26a2a22008-10-31 00:03:22 -04005666 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005667 buf[r++] = '\n';
5668
5669 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5670
5671 mutex_unlock(&dyn_info_mutex);
5672
5673 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005674}
5675
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005676static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005677 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005678 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005679 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005680};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005681#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005682
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005683#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5684static void
5685ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005686{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005687 tracing_snapshot();
5688}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005689
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005690static void
5691ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5692{
5693 unsigned long *count = (long *)data;
5694
5695 if (!*count)
5696 return;
5697
5698 if (*count != -1)
5699 (*count)--;
5700
5701 tracing_snapshot();
5702}
5703
5704static int
5705ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5706 struct ftrace_probe_ops *ops, void *data)
5707{
5708 long count = (long)data;
5709
5710 seq_printf(m, "%ps:", (void *)ip);
5711
5712 seq_printf(m, "snapshot");
5713
5714 if (count == -1)
5715 seq_printf(m, ":unlimited\n");
5716 else
5717 seq_printf(m, ":count=%ld\n", count);
5718
5719 return 0;
5720}
5721
5722static struct ftrace_probe_ops snapshot_probe_ops = {
5723 .func = ftrace_snapshot,
5724 .print = ftrace_snapshot_print,
5725};
5726
5727static struct ftrace_probe_ops snapshot_count_probe_ops = {
5728 .func = ftrace_count_snapshot,
5729 .print = ftrace_snapshot_print,
5730};
5731
5732static int
5733ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5734 char *glob, char *cmd, char *param, int enable)
5735{
5736 struct ftrace_probe_ops *ops;
5737 void *count = (void *)-1;
5738 char *number;
5739 int ret;
5740
5741 /* hash funcs only work with set_ftrace_filter */
5742 if (!enable)
5743 return -EINVAL;
5744
5745 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5746
5747 if (glob[0] == '!') {
5748 unregister_ftrace_function_probe_func(glob+1, ops);
5749 return 0;
5750 }
5751
5752 if (!param)
5753 goto out_reg;
5754
5755 number = strsep(&param, ":");
5756
5757 if (!strlen(number))
5758 goto out_reg;
5759
5760 /*
5761 * We use the callback data field (which is a pointer)
5762 * as our counter.
5763 */
5764 ret = kstrtoul(number, 0, (unsigned long *)&count);
5765 if (ret)
5766 return ret;
5767
5768 out_reg:
5769 ret = register_ftrace_function_probe(glob, ops, count);
5770
5771 if (ret >= 0)
5772 alloc_snapshot(&global_trace);
5773
5774 return ret < 0 ? ret : 0;
5775}
5776
5777static struct ftrace_func_command ftrace_snapshot_cmd = {
5778 .name = "snapshot",
5779 .func = ftrace_trace_snapshot_callback,
5780};
5781
Tom Zanussi38de93a2013-10-24 08:34:18 -05005782static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005783{
5784 return register_ftrace_command(&ftrace_snapshot_cmd);
5785}
5786#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005787static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005788#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005789
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005790struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005791{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005792 if (tr->dir)
5793 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005794
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005795 if (!debugfs_initialized())
5796 return NULL;
5797
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005798 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5799 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005800
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005801 if (!tr->dir)
5802 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005803
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005804 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005805}
5806
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005807struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005808{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005809 return tracing_init_dentry_tr(&global_trace);
5810}
5811
5812static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5813{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005814 struct dentry *d_tracer;
5815
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005816 if (tr->percpu_dir)
5817 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005818
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005819 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005820 if (!d_tracer)
5821 return NULL;
5822
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005823 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005824
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 WARN_ONCE(!tr->percpu_dir,
5826 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005827
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005828 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005829}
5830
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005831static struct dentry *
5832trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5833 void *data, long cpu, const struct file_operations *fops)
5834{
5835 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5836
5837 if (ret) /* See tracing_get_cpu() */
5838 ret->d_inode->i_cdev = (void *)(cpu + 1);
5839 return ret;
5840}
5841
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005842static void
5843tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005844{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005845 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005846 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005847 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005848
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005849 if (!d_percpu)
5850 return;
5851
Steven Rostedtdd49a382010-10-20 21:51:26 -04005852 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005853 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5854 if (!d_cpu) {
5855 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5856 return;
5857 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005858
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005859 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005860 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005861 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005862
5863 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005864 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005865 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005866
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005867 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005868 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005869
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005870 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005871 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005872
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005873 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005874 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005875
5876#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005877 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005878 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005879
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005880 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005881 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005882#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005883}
5884
Steven Rostedt60a11772008-05-12 21:20:44 +02005885#ifdef CONFIG_FTRACE_SELFTEST
5886/* Let selftest have access to static functions in this file */
5887#include "trace_selftest.c"
5888#endif
5889
Steven Rostedt577b7852009-02-26 23:43:05 -05005890struct trace_option_dentry {
5891 struct tracer_opt *opt;
5892 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005893 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005894 struct dentry *entry;
5895};
5896
5897static ssize_t
5898trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5899 loff_t *ppos)
5900{
5901 struct trace_option_dentry *topt = filp->private_data;
5902 char *buf;
5903
5904 if (topt->flags->val & topt->opt->bit)
5905 buf = "1\n";
5906 else
5907 buf = "0\n";
5908
5909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5910}
5911
5912static ssize_t
5913trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5914 loff_t *ppos)
5915{
5916 struct trace_option_dentry *topt = filp->private_data;
5917 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005918 int ret;
5919
Peter Huewe22fe9b52011-06-07 21:58:27 +02005920 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5921 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005922 return ret;
5923
Li Zefan8d18eaa2009-12-08 11:17:06 +08005924 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005925 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005926
5927 if (!!(topt->flags->val & topt->opt->bit) != val) {
5928 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005929 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005930 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005931 mutex_unlock(&trace_types_lock);
5932 if (ret)
5933 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005934 }
5935
5936 *ppos += cnt;
5937
5938 return cnt;
5939}
5940
5941
5942static const struct file_operations trace_options_fops = {
5943 .open = tracing_open_generic,
5944 .read = trace_options_read,
5945 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005946 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005947};
5948
Steven Rostedta8259072009-02-26 22:19:12 -05005949static ssize_t
5950trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5951 loff_t *ppos)
5952{
5953 long index = (long)filp->private_data;
5954 char *buf;
5955
5956 if (trace_flags & (1 << index))
5957 buf = "1\n";
5958 else
5959 buf = "0\n";
5960
5961 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5962}
5963
5964static ssize_t
5965trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5966 loff_t *ppos)
5967{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005968 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005969 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005970 unsigned long val;
5971 int ret;
5972
Peter Huewe22fe9b52011-06-07 21:58:27 +02005973 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5974 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005975 return ret;
5976
Zhaoleif2d84b62009-08-07 18:55:48 +08005977 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005978 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005979
5980 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005981 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005982 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005983
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005984 if (ret < 0)
5985 return ret;
5986
Steven Rostedta8259072009-02-26 22:19:12 -05005987 *ppos += cnt;
5988
5989 return cnt;
5990}
5991
Steven Rostedta8259072009-02-26 22:19:12 -05005992static const struct file_operations trace_options_core_fops = {
5993 .open = tracing_open_generic,
5994 .read = trace_options_core_read,
5995 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005996 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005997};
5998
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005999struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006000 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006001 struct dentry *parent,
6002 void *data,
6003 const struct file_operations *fops)
6004{
6005 struct dentry *ret;
6006
6007 ret = debugfs_create_file(name, mode, parent, data, fops);
6008 if (!ret)
6009 pr_warning("Could not create debugfs '%s' entry\n", name);
6010
6011 return ret;
6012}
6013
6014
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006015static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006016{
6017 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006018
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006019 if (tr->options)
6020 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006022 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006023 if (!d_tracer)
6024 return NULL;
6025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006026 tr->options = debugfs_create_dir("options", d_tracer);
6027 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006028 pr_warning("Could not create debugfs directory 'options'\n");
6029 return NULL;
6030 }
6031
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006032 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006033}
6034
Steven Rostedt577b7852009-02-26 23:43:05 -05006035static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006036create_trace_option_file(struct trace_array *tr,
6037 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006038 struct tracer_flags *flags,
6039 struct tracer_opt *opt)
6040{
6041 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006042
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006043 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006044 if (!t_options)
6045 return;
6046
6047 topt->flags = flags;
6048 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006049 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006050
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006051 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006052 &trace_options_fops);
6053
Steven Rostedt577b7852009-02-26 23:43:05 -05006054}
6055
6056static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006058{
6059 struct trace_option_dentry *topts;
6060 struct tracer_flags *flags;
6061 struct tracer_opt *opts;
6062 int cnt;
6063
6064 if (!tracer)
6065 return NULL;
6066
6067 flags = tracer->flags;
6068
6069 if (!flags || !flags->opts)
6070 return NULL;
6071
6072 opts = flags->opts;
6073
6074 for (cnt = 0; opts[cnt].name; cnt++)
6075 ;
6076
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006077 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006078 if (!topts)
6079 return NULL;
6080
6081 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006082 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006083 &opts[cnt]);
6084
6085 return topts;
6086}
6087
6088static void
6089destroy_trace_option_files(struct trace_option_dentry *topts)
6090{
6091 int cnt;
6092
6093 if (!topts)
6094 return;
6095
6096 for (cnt = 0; topts[cnt].opt; cnt++) {
6097 if (topts[cnt].entry)
6098 debugfs_remove(topts[cnt].entry);
6099 }
6100
6101 kfree(topts);
6102}
6103
Steven Rostedta8259072009-02-26 22:19:12 -05006104static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006105create_trace_option_core_file(struct trace_array *tr,
6106 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006107{
6108 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006109
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006110 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006111 if (!t_options)
6112 return NULL;
6113
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006114 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006115 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006116}
6117
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006118static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006119{
6120 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006121 int i;
6122
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006123 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006124 if (!t_options)
6125 return;
6126
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006127 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006128 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006129}
6130
Steven Rostedt499e5472012-02-22 15:50:28 -05006131static ssize_t
6132rb_simple_read(struct file *filp, char __user *ubuf,
6133 size_t cnt, loff_t *ppos)
6134{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006135 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006136 char buf[64];
6137 int r;
6138
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006139 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006140 r = sprintf(buf, "%d\n", r);
6141
6142 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6143}
6144
6145static ssize_t
6146rb_simple_write(struct file *filp, const char __user *ubuf,
6147 size_t cnt, loff_t *ppos)
6148{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006149 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006150 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006151 unsigned long val;
6152 int ret;
6153
6154 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6155 if (ret)
6156 return ret;
6157
6158 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006159 mutex_lock(&trace_types_lock);
6160 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006161 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006162 if (tr->current_trace->start)
6163 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006164 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006165 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006166 if (tr->current_trace->stop)
6167 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006168 }
6169 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006170 }
6171
6172 (*ppos)++;
6173
6174 return cnt;
6175}
6176
6177static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006178 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006179 .read = rb_simple_read,
6180 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006181 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006182 .llseek = default_llseek,
6183};
6184
Steven Rostedt277ba042012-08-03 16:10:49 -04006185struct dentry *trace_instance_dir;
6186
6187static void
6188init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6189
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006190static int
6191allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006192{
6193 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006194
6195 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6196
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006197 buf->tr = tr;
6198
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006199 buf->buffer = ring_buffer_alloc(size, rb_flags);
6200 if (!buf->buffer)
6201 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006202
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006203 buf->data = alloc_percpu(struct trace_array_cpu);
6204 if (!buf->data) {
6205 ring_buffer_free(buf->buffer);
6206 return -ENOMEM;
6207 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006208
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006209 /* Allocate the first page for all buffers */
6210 set_buffer_entries(&tr->trace_buffer,
6211 ring_buffer_size(tr->trace_buffer.buffer, 0));
6212
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006213 return 0;
6214}
6215
6216static int allocate_trace_buffers(struct trace_array *tr, int size)
6217{
6218 int ret;
6219
6220 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6221 if (ret)
6222 return ret;
6223
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006224#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006225 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6226 allocate_snapshot ? size : 1);
6227 if (WARN_ON(ret)) {
6228 ring_buffer_free(tr->trace_buffer.buffer);
6229 free_percpu(tr->trace_buffer.data);
6230 return -ENOMEM;
6231 }
6232 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006233
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006234 /*
6235 * Only the top level trace array gets its snapshot allocated
6236 * from the kernel command line.
6237 */
6238 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006239#endif
6240 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006241}
6242
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006243static void free_trace_buffer(struct trace_buffer *buf)
6244{
6245 if (buf->buffer) {
6246 ring_buffer_free(buf->buffer);
6247 buf->buffer = NULL;
6248 free_percpu(buf->data);
6249 buf->data = NULL;
6250 }
6251}
6252
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006253static void free_trace_buffers(struct trace_array *tr)
6254{
6255 if (!tr)
6256 return;
6257
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006258 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006259
6260#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006261 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006262#endif
6263}
6264
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006265static int new_instance_create(const char *name)
6266{
Steven Rostedt277ba042012-08-03 16:10:49 -04006267 struct trace_array *tr;
6268 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006269
6270 mutex_lock(&trace_types_lock);
6271
6272 ret = -EEXIST;
6273 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6274 if (tr->name && strcmp(tr->name, name) == 0)
6275 goto out_unlock;
6276 }
6277
6278 ret = -ENOMEM;
6279 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6280 if (!tr)
6281 goto out_unlock;
6282
6283 tr->name = kstrdup(name, GFP_KERNEL);
6284 if (!tr->name)
6285 goto out_free_tr;
6286
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006287 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6288 goto out_free_tr;
6289
6290 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6291
Steven Rostedt277ba042012-08-03 16:10:49 -04006292 raw_spin_lock_init(&tr->start_lock);
6293
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006294 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6295
Steven Rostedt277ba042012-08-03 16:10:49 -04006296 tr->current_trace = &nop_trace;
6297
6298 INIT_LIST_HEAD(&tr->systems);
6299 INIT_LIST_HEAD(&tr->events);
6300
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006301 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006302 goto out_free_tr;
6303
Steven Rostedt277ba042012-08-03 16:10:49 -04006304 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6305 if (!tr->dir)
6306 goto out_free_tr;
6307
6308 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006309 if (ret) {
6310 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006311 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006312 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006313
6314 init_tracer_debugfs(tr, tr->dir);
6315
6316 list_add(&tr->list, &ftrace_trace_arrays);
6317
6318 mutex_unlock(&trace_types_lock);
6319
6320 return 0;
6321
6322 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006323 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006324 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006325 kfree(tr->name);
6326 kfree(tr);
6327
6328 out_unlock:
6329 mutex_unlock(&trace_types_lock);
6330
6331 return ret;
6332
6333}
6334
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006335static int instance_delete(const char *name)
6336{
6337 struct trace_array *tr;
6338 int found = 0;
6339 int ret;
6340
6341 mutex_lock(&trace_types_lock);
6342
6343 ret = -ENODEV;
6344 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6345 if (tr->name && strcmp(tr->name, name) == 0) {
6346 found = 1;
6347 break;
6348 }
6349 }
6350 if (!found)
6351 goto out_unlock;
6352
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006353 ret = -EBUSY;
6354 if (tr->ref)
6355 goto out_unlock;
6356
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006357 list_del(&tr->list);
6358
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006359 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006360 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006361 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006362 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006363 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006364
6365 kfree(tr->name);
6366 kfree(tr);
6367
6368 ret = 0;
6369
6370 out_unlock:
6371 mutex_unlock(&trace_types_lock);
6372
6373 return ret;
6374}
6375
Steven Rostedt277ba042012-08-03 16:10:49 -04006376static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6377{
6378 struct dentry *parent;
6379 int ret;
6380
6381 /* Paranoid: Make sure the parent is the "instances" directory */
6382 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6383 if (WARN_ON_ONCE(parent != trace_instance_dir))
6384 return -ENOENT;
6385
6386 /*
6387 * The inode mutex is locked, but debugfs_create_dir() will also
6388 * take the mutex. As the instances directory can not be destroyed
6389 * or changed in any other way, it is safe to unlock it, and
6390 * let the dentry try. If two users try to make the same dir at
6391 * the same time, then the new_instance_create() will determine the
6392 * winner.
6393 */
6394 mutex_unlock(&inode->i_mutex);
6395
6396 ret = new_instance_create(dentry->d_iname);
6397
6398 mutex_lock(&inode->i_mutex);
6399
6400 return ret;
6401}
6402
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006403static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6404{
6405 struct dentry *parent;
6406 int ret;
6407
6408 /* Paranoid: Make sure the parent is the "instances" directory */
6409 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6410 if (WARN_ON_ONCE(parent != trace_instance_dir))
6411 return -ENOENT;
6412
6413 /* The caller did a dget() on dentry */
6414 mutex_unlock(&dentry->d_inode->i_mutex);
6415
6416 /*
6417 * The inode mutex is locked, but debugfs_create_dir() will also
6418 * take the mutex. As the instances directory can not be destroyed
6419 * or changed in any other way, it is safe to unlock it, and
6420 * let the dentry try. If two users try to make the same dir at
6421 * the same time, then the instance_delete() will determine the
6422 * winner.
6423 */
6424 mutex_unlock(&inode->i_mutex);
6425
6426 ret = instance_delete(dentry->d_iname);
6427
6428 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6429 mutex_lock(&dentry->d_inode->i_mutex);
6430
6431 return ret;
6432}
6433
Steven Rostedt277ba042012-08-03 16:10:49 -04006434static const struct inode_operations instance_dir_inode_operations = {
6435 .lookup = simple_lookup,
6436 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006437 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006438};
6439
6440static __init void create_trace_instances(struct dentry *d_tracer)
6441{
6442 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6443 if (WARN_ON(!trace_instance_dir))
6444 return;
6445
6446 /* Hijack the dir inode operations, to allow mkdir */
6447 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6448}
6449
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006450static void
6451init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6452{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006453 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006454
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006455 trace_create_file("available_tracers", 0444, d_tracer,
6456 tr, &show_traces_fops);
6457
6458 trace_create_file("current_tracer", 0644, d_tracer,
6459 tr, &set_tracer_fops);
6460
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006461 trace_create_file("tracing_cpumask", 0644, d_tracer,
6462 tr, &tracing_cpumask_fops);
6463
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006464 trace_create_file("trace_options", 0644, d_tracer,
6465 tr, &tracing_iter_fops);
6466
6467 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006468 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006469
6470 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006471 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006472
6473 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006474 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006475
6476 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6477 tr, &tracing_total_entries_fops);
6478
Wang YanQing238ae932013-05-26 16:52:01 +08006479 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006480 tr, &tracing_free_buffer_fops);
6481
6482 trace_create_file("trace_marker", 0220, d_tracer,
6483 tr, &tracing_mark_fops);
6484
6485 trace_create_file("trace_clock", 0644, d_tracer, tr,
6486 &trace_clock_fops);
6487
6488 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006489 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006490
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006491#ifdef CONFIG_TRACER_MAX_TRACE
6492 trace_create_file("tracing_max_latency", 0644, d_tracer,
6493 &tr->max_latency, &tracing_max_lat_fops);
6494#endif
6495
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006496 if (ftrace_create_function_files(tr, d_tracer))
6497 WARN(1, "Could not allocate function filter files");
6498
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006499#ifdef CONFIG_TRACER_SNAPSHOT
6500 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006501 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006502#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006503
6504 for_each_tracing_cpu(cpu)
6505 tracing_init_debugfs_percpu(tr, cpu);
6506
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006507}
6508
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006509static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006510{
6511 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006512
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006513 trace_access_lock_init();
6514
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006515 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006516 if (!d_tracer)
6517 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006518
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006519 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006520
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006521 trace_create_file("tracing_thresh", 0644, d_tracer,
6522 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006523
Li Zefan339ae5d2009-04-17 10:34:30 +08006524 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006525 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006526
Avadh Patel69abe6a2009-04-10 16:04:48 -04006527 trace_create_file("saved_cmdlines", 0444, d_tracer,
6528 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006529
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006530 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6531 NULL, &tracing_saved_cmdlines_size_fops);
6532
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006533#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006534 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6535 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006536#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006537
Steven Rostedt277ba042012-08-03 16:10:49 -04006538 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006539
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006540 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006541
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006542 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006543}
6544
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006545static int trace_panic_handler(struct notifier_block *this,
6546 unsigned long event, void *unused)
6547{
Steven Rostedt944ac422008-10-23 19:26:08 -04006548 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006549 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006550 return NOTIFY_OK;
6551}
6552
6553static struct notifier_block trace_panic_notifier = {
6554 .notifier_call = trace_panic_handler,
6555 .next = NULL,
6556 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6557};
6558
6559static int trace_die_handler(struct notifier_block *self,
6560 unsigned long val,
6561 void *data)
6562{
6563 switch (val) {
6564 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006565 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006566 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006567 break;
6568 default:
6569 break;
6570 }
6571 return NOTIFY_OK;
6572}
6573
6574static struct notifier_block trace_die_notifier = {
6575 .notifier_call = trace_die_handler,
6576 .priority = 200
6577};
6578
6579/*
6580 * printk is set to max of 1024, we really don't need it that big.
6581 * Nothing should be printing 1000 characters anyway.
6582 */
6583#define TRACE_MAX_PRINT 1000
6584
6585/*
6586 * Define here KERN_TRACE so that we have one place to modify
6587 * it if we decide to change what log level the ftrace dump
6588 * should be at.
6589 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006590#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006591
Jason Wessel955b61e2010-08-05 09:22:23 -05006592void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006593trace_printk_seq(struct trace_seq *s)
6594{
6595 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006596 if (s->len >= TRACE_MAX_PRINT)
6597 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006598
6599 /* should be zero ended, but we are paranoid. */
6600 s->buffer[s->len] = 0;
6601
6602 printk(KERN_TRACE "%s", s->buffer);
6603
Steven Rostedtf9520752009-03-02 14:04:40 -05006604 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006605}
6606
Jason Wessel955b61e2010-08-05 09:22:23 -05006607void trace_init_global_iter(struct trace_iterator *iter)
6608{
6609 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006610 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006611 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006612 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006613
6614 if (iter->trace && iter->trace->open)
6615 iter->trace->open(iter);
6616
6617 /* Annotate start of buffers if we had overruns */
6618 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6619 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6620
6621 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6622 if (trace_clocks[iter->tr->clock_id].in_ns)
6623 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006624}
6625
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006626void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006627{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006628 /* use static because iter can be a bit big for the stack */
6629 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006630 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006631 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006632 unsigned long flags;
6633 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006634
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006635 /* Only allow one dump user at a time. */
6636 if (atomic_inc_return(&dump_running) != 1) {
6637 atomic_dec(&dump_running);
6638 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006639 }
6640
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006641 /*
6642 * Always turn off tracing when we dump.
6643 * We don't need to show trace output of what happens
6644 * between multiple crashes.
6645 *
6646 * If the user does a sysrq-z, then they can re-enable
6647 * tracing with echo 1 > tracing_on.
6648 */
6649 tracing_off();
6650
6651 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006652
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006653 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006654 trace_init_global_iter(&iter);
6655
Steven Rostedtd7690412008-10-01 00:29:53 -04006656 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006657 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006658 }
6659
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006660 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6661
Török Edwinb54d3de2008-11-22 13:28:48 +02006662 /* don't look at user memory in panic mode */
6663 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6664
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006665 switch (oops_dump_mode) {
6666 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006667 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006668 break;
6669 case DUMP_ORIG:
6670 iter.cpu_file = raw_smp_processor_id();
6671 break;
6672 case DUMP_NONE:
6673 goto out_enable;
6674 default:
6675 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006676 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006677 }
6678
6679 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006680
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006681 /* Did function tracer already get disabled? */
6682 if (ftrace_is_dead()) {
6683 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6684 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6685 }
6686
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006687 /*
6688 * We need to stop all tracing on all CPUS to read the
6689 * the next buffer. This is a bit expensive, but is
6690 * not done often. We fill all what we can read,
6691 * and then release the locks again.
6692 */
6693
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006694 while (!trace_empty(&iter)) {
6695
6696 if (!cnt)
6697 printk(KERN_TRACE "---------------------------------\n");
6698
6699 cnt++;
6700
6701 /* reset all but tr, trace, and overruns */
6702 memset(&iter.seq, 0,
6703 sizeof(struct trace_iterator) -
6704 offsetof(struct trace_iterator, seq));
6705 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6706 iter.pos = -1;
6707
Jason Wessel955b61e2010-08-05 09:22:23 -05006708 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006709 int ret;
6710
6711 ret = print_trace_line(&iter);
6712 if (ret != TRACE_TYPE_NO_CONSUME)
6713 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006714 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006715 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006716
6717 trace_printk_seq(&iter.seq);
6718 }
6719
6720 if (!cnt)
6721 printk(KERN_TRACE " (ftrace buffer empty)\n");
6722 else
6723 printk(KERN_TRACE "---------------------------------\n");
6724
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006725 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006726 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006727
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006728 for_each_tracing_cpu(cpu) {
6729 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006730 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006731 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006732 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006733}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006734EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006735
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006736__init static int tracer_alloc_buffers(void)
6737{
Steven Rostedt73c51622009-03-11 13:42:01 -04006738 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306739 int ret = -ENOMEM;
6740
David Sharp750912f2010-12-08 13:46:47 -08006741
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306742 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6743 goto out;
6744
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006745 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306746 goto out_free_buffer_mask;
6747
Steven Rostedt07d777f2011-09-22 14:01:55 -04006748 /* Only allocate trace_printk buffers if a trace_printk exists */
6749 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006750 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006751 trace_printk_init_buffers();
6752
Steven Rostedt73c51622009-03-11 13:42:01 -04006753 /* To save memory, keep the ring buffer size to its minimum */
6754 if (ring_buffer_expanded)
6755 ring_buf_size = trace_buf_size;
6756 else
6757 ring_buf_size = 1;
6758
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306759 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006760 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006761
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006762 raw_spin_lock_init(&global_trace.start_lock);
6763
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006764 /* Used for event triggers */
6765 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6766 if (!temp_buffer)
6767 goto out_free_cpumask;
6768
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006769 if (trace_create_savedcmd() < 0)
6770 goto out_free_temp_buffer;
6771
Steven Rostedtab464282008-05-12 21:21:00 +02006772 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006773 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006774 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6775 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006776 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006777 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006778
Steven Rostedt499e5472012-02-22 15:50:28 -05006779 if (global_trace.buffer_disabled)
6780 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006781
Steven Rostedte1e232c2014-02-10 23:38:46 -05006782 if (trace_boot_clock) {
6783 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6784 if (ret < 0)
6785 pr_warning("Trace clock %s not defined, going back to default\n",
6786 trace_boot_clock);
6787 }
6788
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006789 /*
6790 * register_tracer() might reference current_trace, so it
6791 * needs to be set before we register anything. This is
6792 * just a bootstrap of current_trace anyway.
6793 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006794 global_trace.current_trace = &nop_trace;
6795
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006796 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6797
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006798 ftrace_init_global_array_ops(&global_trace);
6799
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006800 register_tracer(&nop_trace);
6801
Steven Rostedt60a11772008-05-12 21:20:44 +02006802 /* All seems OK, enable tracing */
6803 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006804
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006805 atomic_notifier_chain_register(&panic_notifier_list,
6806 &trace_panic_notifier);
6807
6808 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006809
Steven Rostedtae63b312012-05-03 23:09:03 -04006810 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6811
6812 INIT_LIST_HEAD(&global_trace.systems);
6813 INIT_LIST_HEAD(&global_trace.events);
6814 list_add(&global_trace.list, &ftrace_trace_arrays);
6815
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006816 while (trace_boot_options) {
6817 char *option;
6818
6819 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006820 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006821 }
6822
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006823 register_snapshot_cmd();
6824
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006825 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006826
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006827out_free_savedcmd:
6828 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006829out_free_temp_buffer:
6830 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306831out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006832 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306833out_free_buffer_mask:
6834 free_cpumask_var(tracing_buffer_mask);
6835out:
6836 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006837}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006838
6839__init static int clear_boot_tracer(void)
6840{
6841 /*
6842 * The default tracer at boot buffer is an init section.
6843 * This function is called in lateinit. If we did not
6844 * find the boot tracer, then clear it out, to prevent
6845 * later registration from accessing the buffer that is
6846 * about to be freed.
6847 */
6848 if (!default_bootup_tracer)
6849 return 0;
6850
6851 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6852 default_bootup_tracer);
6853 default_bootup_tracer = NULL;
6854
6855 return 0;
6856}
6857
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006858early_initcall(tracer_alloc_buffers);
6859fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006860late_initcall(clear_boot_tracer);