blob: 4a343db45d4e452b29fcdd14a4c58f9cde1d0884 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200926static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200927{
928 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200929
930 if (s->len <= s->readpos)
931 return -EBUSY;
932
933 len = s->len - s->readpos;
934 if (cnt > len)
935 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300936 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200937
Steven Rostedte74da522009-03-04 20:31:11 -0500938 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200939 return cnt;
940}
941
Tim Bird0e950172010-02-25 15:36:43 -0800942unsigned long __read_mostly tracing_thresh;
943
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400944#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400945/*
946 * Copy the new maximum trace into the separate maximum-trace
947 * structure. (this way the maximum trace is permanently saved,
948 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
949 */
950static void
951__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
952{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500953 struct trace_buffer *trace_buf = &tr->trace_buffer;
954 struct trace_buffer *max_buf = &tr->max_buffer;
955 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
956 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400957
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500958 max_buf->cpu = cpu;
959 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400960
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500961 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400962 max_data->critical_start = data->critical_start;
963 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400964
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300965 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400966 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400967 /*
968 * If tsk == current, then use current_uid(), as that does not use
969 * RCU. The irq tracer can be called out of RCU scope.
970 */
971 if (tsk == current)
972 max_data->uid = current_uid();
973 else
974 max_data->uid = task_uid(tsk);
975
Steven Rostedt8248ac02009-09-02 12:27:41 -0400976 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
977 max_data->policy = tsk->policy;
978 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400979
980 /* record this tasks comm */
981 tracing_record_cmdline(tsk);
982}
983
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200984/**
985 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
986 * @tr: tracer
987 * @tsk: the task with the latency
988 * @cpu: The cpu that initiated the trace.
989 *
990 * Flip the buffers between the @tr and the max_tr and record information
991 * about which task was the cause of this latency.
992 */
Ingo Molnare309b412008-05-12 21:20:51 +0200993void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200994update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
995{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -0400996 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200997
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400998 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400999 return;
1000
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001001 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001002
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001003 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001004 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001005 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001006 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001007 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001008
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001009 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001010
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001011 buf = tr->trace_buffer.buffer;
1012 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1013 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001014
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001015 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001016 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001017}
1018
1019/**
1020 * update_max_tr_single - only copy one trace over, and reset the rest
1021 * @tr - tracer
1022 * @tsk - task with the latency
1023 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001024 *
1025 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001026 */
Ingo Molnare309b412008-05-12 21:20:51 +02001027void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001028update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1029{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001030 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001031
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001032 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001033 return;
1034
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001035 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001036 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001037 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001038 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001039 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001040 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001041
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001042 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001044 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001045
Steven Rostedte8165db2009-09-03 19:13:05 -04001046 if (ret == -EBUSY) {
1047 /*
1048 * We failed to swap the buffer due to a commit taking
1049 * place on this CPU. We fail to record, but we reset
1050 * the max trace buffer (no one writes directly to it)
1051 * and flag that it failed.
1052 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001053 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001054 "Failed to swap buffers due to commit in progress\n");
1055 }
1056
Steven Rostedte8165db2009-09-03 19:13:05 -04001057 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058
1059 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001060 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001062#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001063
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001064static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001065{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001066 /* Iterators are static, they should be filled or empty */
1067 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001068 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001069
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001070 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001071}
1072
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001073#ifdef CONFIG_FTRACE_STARTUP_TEST
1074static int run_tracer_selftest(struct tracer *type)
1075{
1076 struct trace_array *tr = &global_trace;
1077 struct tracer *saved_tracer = tr->current_trace;
1078 int ret;
1079
1080 if (!type->selftest || tracing_selftest_disabled)
1081 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001084 * Run a selftest on this tracer.
1085 * Here we reset the trace buffer, and set the current
1086 * tracer to be this tracer. The tracer can then run some
1087 * internal tracing to verify that everything is in order.
1088 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001089 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001090 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001091
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001092 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001094#ifdef CONFIG_TRACER_MAX_TRACE
1095 if (type->use_max_tr) {
1096 /* If we expanded the buffers, make sure the max is expanded too */
1097 if (ring_buffer_expanded)
1098 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1099 RING_BUFFER_ALL_CPUS);
1100 tr->allocated_snapshot = true;
1101 }
1102#endif
1103
1104 /* the test is responsible for initializing and enabling */
1105 pr_info("Testing tracer %s: ", type->name);
1106 ret = type->selftest(type, tr);
1107 /* the test is responsible for resetting too */
1108 tr->current_trace = saved_tracer;
1109 if (ret) {
1110 printk(KERN_CONT "FAILED!\n");
1111 /* Add the warning after printing 'FAILED' */
1112 WARN_ON(1);
1113 return -1;
1114 }
1115 /* Only reset on passing, to avoid touching corrupted buffers */
1116 tracing_reset_online_cpus(&tr->trace_buffer);
1117
1118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 tr->allocated_snapshot = false;
1121
1122 /* Shrink the max buffer again */
1123 if (ring_buffer_expanded)
1124 ring_buffer_resize(tr->max_buffer.buffer, 1,
1125 RING_BUFFER_ALL_CPUS);
1126 }
1127#endif
1128
1129 printk(KERN_CONT "PASSED\n");
1130 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001131}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001132#else
1133static inline int run_tracer_selftest(struct tracer *type)
1134{
1135 return 0;
1136}
1137#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001138
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001139/**
1140 * register_tracer - register a tracer with the ftrace system.
1141 * @type - the plugin for the tracer
1142 *
1143 * Register a new plugin tracer.
1144 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001145int register_tracer(struct tracer *type)
1146{
1147 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001148 int ret = 0;
1149
1150 if (!type->name) {
1151 pr_info("Tracer must have a name\n");
1152 return -1;
1153 }
1154
Dan Carpenter24a461d2010-07-10 12:06:44 +02001155 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001156 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1157 return -1;
1158 }
1159
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001160 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001161
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001162 tracing_selftest_running = true;
1163
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001164 for (t = trace_types; t; t = t->next) {
1165 if (strcmp(type->name, t->name) == 0) {
1166 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001167 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001168 type->name);
1169 ret = -1;
1170 goto out;
1171 }
1172 }
1173
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001174 if (!type->set_flag)
1175 type->set_flag = &dummy_set_flag;
1176 if (!type->flags)
1177 type->flags = &dummy_tracer_flags;
1178 else
1179 if (!type->flags->opts)
1180 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001181
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001182 ret = run_tracer_selftest(type);
1183 if (ret < 0)
1184 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001185
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001186 type->next = trace_types;
1187 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001189 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001190 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001191 mutex_unlock(&trace_types_lock);
1192
Steven Rostedtdac74942009-02-05 01:13:38 -05001193 if (ret || !default_bootup_tracer)
1194 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001195
Li Zefanee6c2c12009-09-18 14:06:47 +08001196 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001197 goto out_unlock;
1198
1199 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1200 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001201 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001202 default_bootup_tracer = NULL;
1203 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001204 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001205#ifdef CONFIG_FTRACE_STARTUP_TEST
1206 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1207 type->name);
1208#endif
1209
1210 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001211 return ret;
1212}
1213
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001214void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001215{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001216 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001217
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001218 if (!buffer)
1219 return;
1220
Steven Rostedtf6339032009-09-04 12:35:16 -04001221 ring_buffer_record_disable(buffer);
1222
1223 /* Make sure all commits have finished */
1224 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001225 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001226
1227 ring_buffer_record_enable(buffer);
1228}
1229
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001230void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001231{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001232 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001233 int cpu;
1234
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001235 if (!buffer)
1236 return;
1237
Steven Rostedt621968c2009-09-04 12:02:35 -04001238 ring_buffer_record_disable(buffer);
1239
1240 /* Make sure all commits have finished */
1241 synchronize_sched();
1242
Alexander Z Lam94571582013-08-02 18:36:16 -07001243 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001244
1245 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001246 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001247
1248 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001249}
1250
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001251/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001252void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001253{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001254 struct trace_array *tr;
1255
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001256 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001257 tracing_reset_online_cpus(&tr->trace_buffer);
1258#ifdef CONFIG_TRACER_MAX_TRACE
1259 tracing_reset_online_cpus(&tr->max_buffer);
1260#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001261 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001262}
1263
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001264#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001265#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001266static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001267struct saved_cmdlines_buffer {
1268 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1269 unsigned *map_cmdline_to_pid;
1270 unsigned cmdline_num;
1271 int cmdline_idx;
1272 char *saved_cmdlines;
1273};
1274static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001275
Steven Rostedt25b0b442008-05-12 21:21:00 +02001276/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001277static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001278
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001279static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001280{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001281 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1282}
1283
1284static inline void set_cmdline(int idx, const char *cmdline)
1285{
1286 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1287}
1288
1289static int allocate_cmdlines_buffer(unsigned int val,
1290 struct saved_cmdlines_buffer *s)
1291{
1292 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1293 GFP_KERNEL);
1294 if (!s->map_cmdline_to_pid)
1295 return -ENOMEM;
1296
1297 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1298 if (!s->saved_cmdlines) {
1299 kfree(s->map_cmdline_to_pid);
1300 return -ENOMEM;
1301 }
1302
1303 s->cmdline_idx = 0;
1304 s->cmdline_num = val;
1305 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1306 sizeof(s->map_pid_to_cmdline));
1307 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1308 val * sizeof(*s->map_cmdline_to_pid));
1309
1310 return 0;
1311}
1312
1313static int trace_create_savedcmd(void)
1314{
1315 int ret;
1316
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001317 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001318 if (!savedcmd)
1319 return -ENOMEM;
1320
1321 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1322 if (ret < 0) {
1323 kfree(savedcmd);
1324 savedcmd = NULL;
1325 return -ENOMEM;
1326 }
1327
1328 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001329}
1330
Carsten Emdeb5130b12009-09-13 01:43:07 +02001331int is_tracing_stopped(void)
1332{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001333 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001334}
1335
Steven Rostedt0f048702008-11-05 16:05:44 -05001336/**
1337 * tracing_start - quick start of the tracer
1338 *
1339 * If tracing is enabled but was stopped by tracing_stop,
1340 * this will start the tracer back up.
1341 */
1342void tracing_start(void)
1343{
1344 struct ring_buffer *buffer;
1345 unsigned long flags;
1346
1347 if (tracing_disabled)
1348 return;
1349
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001350 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1351 if (--global_trace.stop_count) {
1352 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001353 /* Someone screwed up their debugging */
1354 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001355 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001356 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001357 goto out;
1358 }
1359
Steven Rostedta2f80712010-03-12 19:56:00 -05001360 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001361 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001362
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001363 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001364 if (buffer)
1365 ring_buffer_record_enable(buffer);
1366
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001367#ifdef CONFIG_TRACER_MAX_TRACE
1368 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001369 if (buffer)
1370 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001371#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001372
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001373 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001374
Steven Rostedt0f048702008-11-05 16:05:44 -05001375 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001376 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1377}
1378
1379static void tracing_start_tr(struct trace_array *tr)
1380{
1381 struct ring_buffer *buffer;
1382 unsigned long flags;
1383
1384 if (tracing_disabled)
1385 return;
1386
1387 /* If global, we need to also start the max tracer */
1388 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1389 return tracing_start();
1390
1391 raw_spin_lock_irqsave(&tr->start_lock, flags);
1392
1393 if (--tr->stop_count) {
1394 if (tr->stop_count < 0) {
1395 /* Someone screwed up their debugging */
1396 WARN_ON_ONCE(1);
1397 tr->stop_count = 0;
1398 }
1399 goto out;
1400 }
1401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001403 if (buffer)
1404 ring_buffer_record_enable(buffer);
1405
1406 out:
1407 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001408}
1409
1410/**
1411 * tracing_stop - quick stop of the tracer
1412 *
1413 * Light weight way to stop tracing. Use in conjunction with
1414 * tracing_start.
1415 */
1416void tracing_stop(void)
1417{
1418 struct ring_buffer *buffer;
1419 unsigned long flags;
1420
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001421 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1422 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001423 goto out;
1424
Steven Rostedta2f80712010-03-12 19:56:00 -05001425 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001426 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001427
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001428 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001429 if (buffer)
1430 ring_buffer_record_disable(buffer);
1431
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001432#ifdef CONFIG_TRACER_MAX_TRACE
1433 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001434 if (buffer)
1435 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001436#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001437
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001438 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001439
Steven Rostedt0f048702008-11-05 16:05:44 -05001440 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001441 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1442}
1443
1444static void tracing_stop_tr(struct trace_array *tr)
1445{
1446 struct ring_buffer *buffer;
1447 unsigned long flags;
1448
1449 /* If global, we need to also stop the max tracer */
1450 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1451 return tracing_stop();
1452
1453 raw_spin_lock_irqsave(&tr->start_lock, flags);
1454 if (tr->stop_count++)
1455 goto out;
1456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
1461 out:
1462 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001463}
1464
Ingo Molnare309b412008-05-12 21:20:51 +02001465void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001466
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001467static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001468{
Carsten Emdea635cf02009-03-18 09:00:41 +01001469 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470
1471 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001472 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001473
1474 /*
1475 * It's not the end of the world if we don't get
1476 * the lock, but we also don't want to spin
1477 * nor do we want to disable interrupts,
1478 * so if we miss here, then better luck next time.
1479 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001480 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001481 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001483 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001484 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001485 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486
Carsten Emdea635cf02009-03-18 09:00:41 +01001487 /*
1488 * Check whether the cmdline buffer at idx has a pid
1489 * mapped. We are going to overwrite that entry so we
1490 * need to clear the map_pid_to_cmdline. Otherwise we
1491 * would read the new comm for the old pid.
1492 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001493 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001494 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001495 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001496
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001497 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1498 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001500 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501 }
1502
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001503 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001504
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001505 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001506
1507 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508}
1509
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001510static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512 unsigned map;
1513
Steven Rostedt4ca53082009-03-16 19:20:15 -04001514 if (!pid) {
1515 strcpy(comm, "<idle>");
1516 return;
1517 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001518
Steven Rostedt74bf4072010-01-25 15:11:53 -05001519 if (WARN_ON_ONCE(pid < 0)) {
1520 strcpy(comm, "<XXX>");
1521 return;
1522 }
1523
Steven Rostedt4ca53082009-03-16 19:20:15 -04001524 if (pid > PID_MAX_DEFAULT) {
1525 strcpy(comm, "<...>");
1526 return;
1527 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001530 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001531 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001532 else
1533 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001534}
1535
1536void trace_find_cmdline(int pid, char comm[])
1537{
1538 preempt_disable();
1539 arch_spin_lock(&trace_cmdline_lock);
1540
1541 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001543 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001544 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545}
1546
Ingo Molnare309b412008-05-12 21:20:51 +02001547void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001548{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001549 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550 return;
1551
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001552 if (!__this_cpu_read(trace_cmdline_save))
1553 return;
1554
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001555 if (trace_save_cmdline(tsk))
1556 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557}
1558
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001559void
Steven Rostedt38697052008-10-01 13:14:09 -04001560tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1561 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562{
1563 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001564
Steven Rostedt777e2082008-09-29 23:02:42 -04001565 entry->preempt_count = pc & 0xff;
1566 entry->pid = (tsk) ? tsk->pid : 0;
1567 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001568#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001569 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001570#else
1571 TRACE_FLAG_IRQS_NOSUPPORT |
1572#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1574 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001575 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1576 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001578EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579
Steven Rostedte77405a2009-09-02 14:17:06 -04001580struct ring_buffer_event *
1581trace_buffer_lock_reserve(struct ring_buffer *buffer,
1582 int type,
1583 unsigned long len,
1584 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001585{
1586 struct ring_buffer_event *event;
1587
Steven Rostedte77405a2009-09-02 14:17:06 -04001588 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001589 if (event != NULL) {
1590 struct trace_entry *ent = ring_buffer_event_data(event);
1591
1592 tracing_generic_entry_update(ent, flags, pc);
1593 ent->type = type;
1594 }
1595
1596 return event;
1597}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001598
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001599void
1600__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1601{
1602 __this_cpu_write(trace_cmdline_save, true);
1603 ring_buffer_unlock_commit(buffer, event);
1604}
1605
Steven Rostedte77405a2009-09-02 14:17:06 -04001606static inline void
1607__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1608 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001609 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001610{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001611 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001612
Steven Rostedte77405a2009-09-02 14:17:06 -04001613 ftrace_trace_stack(buffer, flags, 6, pc);
1614 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001615}
1616
Steven Rostedte77405a2009-09-02 14:17:06 -04001617void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1618 struct ring_buffer_event *event,
1619 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001620{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001621 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001622}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001623EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001624
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001625static struct ring_buffer *temp_buffer;
1626
Steven Rostedtef5580d2009-02-27 19:38:04 -05001627struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001628trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1629 struct ftrace_event_file *ftrace_file,
1630 int type, unsigned long len,
1631 unsigned long flags, int pc)
1632{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001633 struct ring_buffer_event *entry;
1634
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001635 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001636 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001637 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001638 /*
1639 * If tracing is off, but we have triggers enabled
1640 * we still need to look at the event data. Use the temp_buffer
1641 * to store the trace event for the tigger to use. It's recusive
1642 * safe and will not be recorded anywhere.
1643 */
1644 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1645 *current_rb = temp_buffer;
1646 entry = trace_buffer_lock_reserve(*current_rb,
1647 type, len, flags, pc);
1648 }
1649 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001650}
1651EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1652
1653struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001654trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1655 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001656 unsigned long flags, int pc)
1657{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001658 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001659 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001660 type, len, flags, pc);
1661}
Steven Rostedt94487d62009-05-05 19:22:53 -04001662EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001663
Steven Rostedte77405a2009-09-02 14:17:06 -04001664void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1665 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001666 unsigned long flags, int pc)
1667{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001668 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001669}
Steven Rostedt94487d62009-05-05 19:22:53 -04001670EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001671
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001672void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1673 struct ring_buffer_event *event,
1674 unsigned long flags, int pc,
1675 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001676{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001677 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001678
1679 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1680 ftrace_trace_userstack(buffer, flags, pc);
1681}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001682EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001683
Steven Rostedte77405a2009-09-02 14:17:06 -04001684void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1685 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001686{
Steven Rostedte77405a2009-09-02 14:17:06 -04001687 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001688}
Steven Rostedt12acd472009-04-17 16:01:56 -04001689EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001690
Ingo Molnare309b412008-05-12 21:20:51 +02001691void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001692trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001693 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1694 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001695{
Tom Zanussie1112b42009-03-31 00:48:49 -05001696 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001697 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001698 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001699 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001700
Steven Rostedtd7690412008-10-01 00:29:53 -04001701 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001702 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001703 return;
1704
Steven Rostedte77405a2009-09-02 14:17:06 -04001705 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001706 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001707 if (!event)
1708 return;
1709 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001710 entry->ip = ip;
1711 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001712
Tom Zanussif306cc82013-10-24 08:34:17 -05001713 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001714 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001715}
1716
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001717#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001718
1719#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1720struct ftrace_stack {
1721 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1722};
1723
1724static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1725static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1726
Steven Rostedte77405a2009-09-02 14:17:06 -04001727static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001728 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001729 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001730{
Tom Zanussie1112b42009-03-31 00:48:49 -05001731 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001732 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001733 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001734 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001735 int use_stack;
1736 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001737
1738 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001739 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001740
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001741 /*
1742 * Since events can happen in NMIs there's no safe way to
1743 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1744 * or NMI comes in, it will just have to use the default
1745 * FTRACE_STACK_SIZE.
1746 */
1747 preempt_disable_notrace();
1748
Shan Wei82146522012-11-19 13:21:01 +08001749 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001750 /*
1751 * We don't need any atomic variables, just a barrier.
1752 * If an interrupt comes in, we don't care, because it would
1753 * have exited and put the counter back to what we want.
1754 * We just need a barrier to keep gcc from moving things
1755 * around.
1756 */
1757 barrier();
1758 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001759 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001760 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1761
1762 if (regs)
1763 save_stack_trace_regs(regs, &trace);
1764 else
1765 save_stack_trace(&trace);
1766
1767 if (trace.nr_entries > size)
1768 size = trace.nr_entries;
1769 } else
1770 /* From now on, use_stack is a boolean */
1771 use_stack = 0;
1772
1773 size *= sizeof(unsigned long);
1774
1775 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1776 sizeof(*entry) + size, flags, pc);
1777 if (!event)
1778 goto out;
1779 entry = ring_buffer_event_data(event);
1780
1781 memset(&entry->caller, 0, size);
1782
1783 if (use_stack)
1784 memcpy(&entry->caller, trace.entries,
1785 trace.nr_entries * sizeof(unsigned long));
1786 else {
1787 trace.max_entries = FTRACE_STACK_ENTRIES;
1788 trace.entries = entry->caller;
1789 if (regs)
1790 save_stack_trace_regs(regs, &trace);
1791 else
1792 save_stack_trace(&trace);
1793 }
1794
1795 entry->size = trace.nr_entries;
1796
Tom Zanussif306cc82013-10-24 08:34:17 -05001797 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001798 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001799
1800 out:
1801 /* Again, don't let gcc optimize things here */
1802 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001803 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001804 preempt_enable_notrace();
1805
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001806}
1807
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001808void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1809 int skip, int pc, struct pt_regs *regs)
1810{
1811 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1812 return;
1813
1814 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1815}
1816
Steven Rostedte77405a2009-09-02 14:17:06 -04001817void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1818 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001819{
1820 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1821 return;
1822
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001823 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001824}
1825
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001826void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1827 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001828{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001829 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001830}
1831
Steven Rostedt03889382009-12-11 09:48:22 -05001832/**
1833 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001834 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001835 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001836void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001837{
1838 unsigned long flags;
1839
1840 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001841 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001842
1843 local_save_flags(flags);
1844
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001845 /*
1846 * Skip 3 more, seems to get us at the caller of
1847 * this function.
1848 */
1849 skip += 3;
1850 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1851 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001852}
1853
Steven Rostedt91e86e52010-11-10 12:56:12 +01001854static DEFINE_PER_CPU(int, user_stack_count);
1855
Steven Rostedte77405a2009-09-02 14:17:06 -04001856void
1857ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001858{
Tom Zanussie1112b42009-03-31 00:48:49 -05001859 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001860 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001861 struct userstack_entry *entry;
1862 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001863
1864 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1865 return;
1866
Steven Rostedtb6345872010-03-12 20:03:30 -05001867 /*
1868 * NMIs can not handle page faults, even with fix ups.
1869 * The save user stack can (and often does) fault.
1870 */
1871 if (unlikely(in_nmi()))
1872 return;
1873
Steven Rostedt91e86e52010-11-10 12:56:12 +01001874 /*
1875 * prevent recursion, since the user stack tracing may
1876 * trigger other kernel events.
1877 */
1878 preempt_disable();
1879 if (__this_cpu_read(user_stack_count))
1880 goto out;
1881
1882 __this_cpu_inc(user_stack_count);
1883
Steven Rostedte77405a2009-09-02 14:17:06 -04001884 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001885 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001886 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001887 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001888 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001889
Steven Rostedt48659d32009-09-11 11:36:23 -04001890 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001891 memset(&entry->caller, 0, sizeof(entry->caller));
1892
1893 trace.nr_entries = 0;
1894 trace.max_entries = FTRACE_STACK_ENTRIES;
1895 trace.skip = 0;
1896 trace.entries = entry->caller;
1897
1898 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001899 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001900 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001901
Li Zefan1dbd1952010-12-09 15:47:56 +08001902 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001903 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001904 out:
1905 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001906}
1907
Hannes Eder4fd27352009-02-10 19:44:12 +01001908#ifdef UNUSED
1909static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001910{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001911 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001912}
Hannes Eder4fd27352009-02-10 19:44:12 +01001913#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001914
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001915#endif /* CONFIG_STACKTRACE */
1916
Steven Rostedt07d777f2011-09-22 14:01:55 -04001917/* created for use with alloc_percpu */
1918struct trace_buffer_struct {
1919 char buffer[TRACE_BUF_SIZE];
1920};
1921
1922static struct trace_buffer_struct *trace_percpu_buffer;
1923static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1924static struct trace_buffer_struct *trace_percpu_irq_buffer;
1925static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1926
1927/*
1928 * The buffer used is dependent on the context. There is a per cpu
1929 * buffer for normal context, softirq contex, hard irq context and
1930 * for NMI context. Thise allows for lockless recording.
1931 *
1932 * Note, if the buffers failed to be allocated, then this returns NULL
1933 */
1934static char *get_trace_buf(void)
1935{
1936 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001937
1938 /*
1939 * If we have allocated per cpu buffers, then we do not
1940 * need to do any locking.
1941 */
1942 if (in_nmi())
1943 percpu_buffer = trace_percpu_nmi_buffer;
1944 else if (in_irq())
1945 percpu_buffer = trace_percpu_irq_buffer;
1946 else if (in_softirq())
1947 percpu_buffer = trace_percpu_sirq_buffer;
1948 else
1949 percpu_buffer = trace_percpu_buffer;
1950
1951 if (!percpu_buffer)
1952 return NULL;
1953
Shan Weid8a03492012-11-13 09:53:04 +08001954 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001955}
1956
1957static int alloc_percpu_trace_buffer(void)
1958{
1959 struct trace_buffer_struct *buffers;
1960 struct trace_buffer_struct *sirq_buffers;
1961 struct trace_buffer_struct *irq_buffers;
1962 struct trace_buffer_struct *nmi_buffers;
1963
1964 buffers = alloc_percpu(struct trace_buffer_struct);
1965 if (!buffers)
1966 goto err_warn;
1967
1968 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1969 if (!sirq_buffers)
1970 goto err_sirq;
1971
1972 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1973 if (!irq_buffers)
1974 goto err_irq;
1975
1976 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1977 if (!nmi_buffers)
1978 goto err_nmi;
1979
1980 trace_percpu_buffer = buffers;
1981 trace_percpu_sirq_buffer = sirq_buffers;
1982 trace_percpu_irq_buffer = irq_buffers;
1983 trace_percpu_nmi_buffer = nmi_buffers;
1984
1985 return 0;
1986
1987 err_nmi:
1988 free_percpu(irq_buffers);
1989 err_irq:
1990 free_percpu(sirq_buffers);
1991 err_sirq:
1992 free_percpu(buffers);
1993 err_warn:
1994 WARN(1, "Could not allocate percpu trace_printk buffer");
1995 return -ENOMEM;
1996}
1997
Steven Rostedt81698832012-10-11 10:15:05 -04001998static int buffers_allocated;
1999
Steven Rostedt07d777f2011-09-22 14:01:55 -04002000void trace_printk_init_buffers(void)
2001{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002002 if (buffers_allocated)
2003 return;
2004
2005 if (alloc_percpu_trace_buffer())
2006 return;
2007
Steven Rostedt2184db42014-05-28 13:14:40 -04002008 /* trace_printk() is for debug use only. Don't use it in production. */
2009
2010 pr_warning("\n**********************************************************\n");
2011 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2012 pr_warning("** **\n");
2013 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2014 pr_warning("** **\n");
2015 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2016 pr_warning("** unsafe for produciton use. **\n");
2017 pr_warning("** **\n");
2018 pr_warning("** If you see this message and you are not debugging **\n");
2019 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2020 pr_warning("** **\n");
2021 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2022 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002023
Steven Rostedtb382ede62012-10-10 21:44:34 -04002024 /* Expand the buffers to set size */
2025 tracing_update_buffers();
2026
Steven Rostedt07d777f2011-09-22 14:01:55 -04002027 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002028
2029 /*
2030 * trace_printk_init_buffers() can be called by modules.
2031 * If that happens, then we need to start cmdline recording
2032 * directly here. If the global_trace.buffer is already
2033 * allocated here, then this was called by module code.
2034 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002035 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002036 tracing_start_cmdline_record();
2037}
2038
2039void trace_printk_start_comm(void)
2040{
2041 /* Start tracing comms if trace printk is set */
2042 if (!buffers_allocated)
2043 return;
2044 tracing_start_cmdline_record();
2045}
2046
2047static void trace_printk_start_stop_comm(int enabled)
2048{
2049 if (!buffers_allocated)
2050 return;
2051
2052 if (enabled)
2053 tracing_start_cmdline_record();
2054 else
2055 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002056}
2057
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002059 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002060 *
2061 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002062int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002063{
Tom Zanussie1112b42009-03-31 00:48:49 -05002064 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002065 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002066 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002067 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002068 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002069 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002070 char *tbuffer;
2071 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002072
2073 if (unlikely(tracing_selftest_running || tracing_disabled))
2074 return 0;
2075
2076 /* Don't pollute graph traces with trace_vprintk internals */
2077 pause_graph_tracing();
2078
2079 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002080 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002081
Steven Rostedt07d777f2011-09-22 14:01:55 -04002082 tbuffer = get_trace_buf();
2083 if (!tbuffer) {
2084 len = 0;
2085 goto out;
2086 }
2087
2088 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2089
2090 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002091 goto out;
2092
Steven Rostedt07d777f2011-09-22 14:01:55 -04002093 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002094 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002095 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002096 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2097 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002099 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002100 entry = ring_buffer_event_data(event);
2101 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002102 entry->fmt = fmt;
2103
Steven Rostedt07d777f2011-09-22 14:01:55 -04002104 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002105 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002106 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002107 ftrace_trace_stack(buffer, flags, 6, pc);
2108 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002109
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002111 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002112 unpause_graph_tracing();
2113
2114 return len;
2115}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002116EXPORT_SYMBOL_GPL(trace_vbprintk);
2117
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002118static int
2119__trace_array_vprintk(struct ring_buffer *buffer,
2120 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002121{
Tom Zanussie1112b42009-03-31 00:48:49 -05002122 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002123 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002124 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002125 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 unsigned long flags;
2127 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002128
2129 if (tracing_disabled || tracing_selftest_running)
2130 return 0;
2131
Steven Rostedt07d777f2011-09-22 14:01:55 -04002132 /* Don't pollute graph traces with trace_vprintk internals */
2133 pause_graph_tracing();
2134
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002135 pc = preempt_count();
2136 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137
Steven Rostedt07d777f2011-09-22 14:01:55 -04002138
2139 tbuffer = get_trace_buf();
2140 if (!tbuffer) {
2141 len = 0;
2142 goto out;
2143 }
2144
2145 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2146 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002147 goto out;
2148
Steven Rostedt07d777f2011-09-22 14:01:55 -04002149 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002150 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002151 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002152 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002154 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002155 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002156 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002157
Steven Rostedt07d777f2011-09-22 14:01:55 -04002158 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002159 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002160 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002161 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002162 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002163 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 out:
2165 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002166 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167
2168 return len;
2169}
Steven Rostedt659372d2009-09-03 19:11:07 -04002170
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002171int trace_array_vprintk(struct trace_array *tr,
2172 unsigned long ip, const char *fmt, va_list args)
2173{
2174 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2175}
2176
2177int trace_array_printk(struct trace_array *tr,
2178 unsigned long ip, const char *fmt, ...)
2179{
2180 int ret;
2181 va_list ap;
2182
2183 if (!(trace_flags & TRACE_ITER_PRINTK))
2184 return 0;
2185
2186 va_start(ap, fmt);
2187 ret = trace_array_vprintk(tr, ip, fmt, ap);
2188 va_end(ap);
2189 return ret;
2190}
2191
2192int trace_array_printk_buf(struct ring_buffer *buffer,
2193 unsigned long ip, const char *fmt, ...)
2194{
2195 int ret;
2196 va_list ap;
2197
2198 if (!(trace_flags & TRACE_ITER_PRINTK))
2199 return 0;
2200
2201 va_start(ap, fmt);
2202 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2203 va_end(ap);
2204 return ret;
2205}
2206
Steven Rostedt659372d2009-09-03 19:11:07 -04002207int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2208{
Steven Rostedta813a152009-10-09 01:41:35 -04002209 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002210}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002211EXPORT_SYMBOL_GPL(trace_vprintk);
2212
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002213static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002214{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002215 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2216
Steven Rostedt5a90f572008-09-03 17:42:51 -04002217 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002218 if (buf_iter)
2219 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002220}
2221
Ingo Molnare309b412008-05-12 21:20:51 +02002222static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002223peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2224 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002225{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002226 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002227 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002228
Steven Rostedtd7690412008-10-01 00:29:53 -04002229 if (buf_iter)
2230 event = ring_buffer_iter_peek(buf_iter, ts);
2231 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002232 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002233 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002234
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002235 if (event) {
2236 iter->ent_size = ring_buffer_event_length(event);
2237 return ring_buffer_event_data(event);
2238 }
2239 iter->ent_size = 0;
2240 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002241}
Steven Rostedtd7690412008-10-01 00:29:53 -04002242
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002243static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002244__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2245 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002246{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002247 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002248 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002249 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002250 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002251 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002252 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002253 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002254 int cpu;
2255
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002256 /*
2257 * If we are in a per_cpu trace file, don't bother by iterating over
2258 * all cpu and peek directly.
2259 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002260 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002261 if (ring_buffer_empty_cpu(buffer, cpu_file))
2262 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002263 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002264 if (ent_cpu)
2265 *ent_cpu = cpu_file;
2266
2267 return ent;
2268 }
2269
Steven Rostedtab464282008-05-12 21:21:00 +02002270 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002271
2272 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002273 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002274
Steven Rostedtbc21b472010-03-31 19:49:26 -04002275 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002276
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002277 /*
2278 * Pick the entry with the smallest timestamp:
2279 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002280 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002281 next = ent;
2282 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002283 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002284 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002285 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002286 }
2287 }
2288
Steven Rostedt12b5da32012-03-27 10:43:28 -04002289 iter->ent_size = next_size;
2290
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002291 if (ent_cpu)
2292 *ent_cpu = next_cpu;
2293
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002294 if (ent_ts)
2295 *ent_ts = next_ts;
2296
Steven Rostedtbc21b472010-03-31 19:49:26 -04002297 if (missing_events)
2298 *missing_events = next_lost;
2299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 return next;
2301}
2302
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002303/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002304struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2305 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002306{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002307 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002308}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002309
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002310/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002311void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002312{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002313 iter->ent = __find_next_entry(iter, &iter->cpu,
2314 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002315
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002316 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002317 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002318
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002319 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002320}
2321
Ingo Molnare309b412008-05-12 21:20:51 +02002322static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002323{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002324 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002325 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326}
2327
Ingo Molnare309b412008-05-12 21:20:51 +02002328static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002329{
2330 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002331 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002332 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002333
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002334 WARN_ON_ONCE(iter->leftover);
2335
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002336 (*pos)++;
2337
2338 /* can't go backwards */
2339 if (iter->idx > i)
2340 return NULL;
2341
2342 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002343 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344 else
2345 ent = iter;
2346
2347 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002348 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349
2350 iter->pos = *pos;
2351
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352 return ent;
2353}
2354
Jason Wessel955b61e2010-08-05 09:22:23 -05002355void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002356{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002357 struct ring_buffer_event *event;
2358 struct ring_buffer_iter *buf_iter;
2359 unsigned long entries = 0;
2360 u64 ts;
2361
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002362 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002363
Steven Rostedt6d158a82012-06-27 20:46:14 -04002364 buf_iter = trace_buffer_iter(iter, cpu);
2365 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002366 return;
2367
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002368 ring_buffer_iter_reset(buf_iter);
2369
2370 /*
2371 * We could have the case with the max latency tracers
2372 * that a reset never took place on a cpu. This is evident
2373 * by the timestamp being before the start of the buffer.
2374 */
2375 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002376 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002377 break;
2378 entries++;
2379 ring_buffer_read(buf_iter, NULL);
2380 }
2381
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002382 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383}
2384
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002385/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002386 * The current tracer is copied to avoid a global locking
2387 * all around.
2388 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002389static void *s_start(struct seq_file *m, loff_t *pos)
2390{
2391 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002392 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002393 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394 void *p = NULL;
2395 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002396 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002398 /*
2399 * copy the tracer to avoid using a global lock all around.
2400 * iter->trace is a copy of current_trace, the pointer to the
2401 * name may be used instead of a strcmp(), as iter->trace->name
2402 * will point to the same string as current_trace->name.
2403 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002404 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002405 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2406 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002407 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002409#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002410 if (iter->snapshot && iter->trace->use_max_tr)
2411 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002412#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002413
2414 if (!iter->snapshot)
2415 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002416
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 if (*pos != iter->pos) {
2418 iter->ent = NULL;
2419 iter->cpu = 0;
2420 iter->idx = -1;
2421
Steven Rostedtae3b5092013-01-23 15:22:59 -05002422 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002423 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002424 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002425 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002426 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002427
Lai Jiangshanac91d852010-03-02 17:54:50 +08002428 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2430 ;
2431
2432 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002433 /*
2434 * If we overflowed the seq_file before, then we want
2435 * to just reuse the trace_seq buffer again.
2436 */
2437 if (iter->leftover)
2438 p = iter;
2439 else {
2440 l = *pos - 1;
2441 p = s_next(m, p, &l);
2442 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 }
2444
Lai Jiangshan4f535962009-05-18 19:35:34 +08002445 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002446 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002447 return p;
2448}
2449
2450static void s_stop(struct seq_file *m, void *p)
2451{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002452 struct trace_iterator *iter = m->private;
2453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002454#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002455 if (iter->snapshot && iter->trace->use_max_tr)
2456 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002457#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002458
2459 if (!iter->snapshot)
2460 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002461
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002462 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002463 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002464}
2465
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002466static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002467get_total_entries(struct trace_buffer *buf,
2468 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002469{
2470 unsigned long count;
2471 int cpu;
2472
2473 *total = 0;
2474 *entries = 0;
2475
2476 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002478 /*
2479 * If this buffer has skipped entries, then we hold all
2480 * entries for the trace and we need to ignore the
2481 * ones before the time stamp.
2482 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2484 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002485 /* total is the same as the entries */
2486 *total += count;
2487 } else
2488 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002489 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002490 *entries += count;
2491 }
2492}
2493
Ingo Molnare309b412008-05-12 21:20:51 +02002494static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002495{
Michael Ellermana6168352008-08-20 16:36:11 -07002496 seq_puts(m, "# _------=> CPU# \n");
2497 seq_puts(m, "# / _-----=> irqs-off \n");
2498 seq_puts(m, "# | / _----=> need-resched \n");
2499 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2500 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002501 seq_puts(m, "# |||| / delay \n");
2502 seq_puts(m, "# cmd pid ||||| time | caller \n");
2503 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002504}
2505
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002506static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002507{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002508 unsigned long total;
2509 unsigned long entries;
2510
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002511 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002512 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2513 entries, total, num_online_cpus());
2514 seq_puts(m, "#\n");
2515}
2516
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002517static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002518{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002520 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002521 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522}
2523
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002524static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002525{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002526 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002527 seq_puts(m, "# _-----=> irqs-off\n");
2528 seq_puts(m, "# / _----=> need-resched\n");
2529 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2530 seq_puts(m, "# || / _--=> preempt-depth\n");
2531 seq_puts(m, "# ||| / delay\n");
2532 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2533 seq_puts(m, "# | | | |||| | |\n");
2534}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002535
Jiri Olsa62b915f2010-04-02 19:01:22 +02002536void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002537print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2538{
2539 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002540 struct trace_buffer *buf = iter->trace_buffer;
2541 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002542 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002543 unsigned long entries;
2544 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002545 const char *name = "preemption";
2546
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002547 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002549 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002551 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002552 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002555 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002556 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002557 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002558 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002559 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002560 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561#if defined(CONFIG_PREEMPT_NONE)
2562 "server",
2563#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2564 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002565#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566 "preempt",
2567#else
2568 "unknown",
2569#endif
2570 /* These are reserved for later use */
2571 0, 0, 0, 0);
2572#ifdef CONFIG_SMP
2573 seq_printf(m, " #P:%d)\n", num_online_cpus());
2574#else
2575 seq_puts(m, ")\n");
2576#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_puts(m, "# -----------------\n");
2578 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002579 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002580 data->comm, data->pid,
2581 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002583 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584
2585 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002586 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002587 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2588 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002589 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002590 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2591 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002592 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002593 }
2594
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002595 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596}
2597
Steven Rostedta3097202008-11-07 22:36:02 -05002598static void test_cpu_buff_start(struct trace_iterator *iter)
2599{
2600 struct trace_seq *s = &iter->seq;
2601
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002602 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2603 return;
2604
2605 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2606 return;
2607
Rusty Russell44623442009-01-01 10:12:23 +10302608 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002609 return;
2610
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002611 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002612 return;
2613
Rusty Russell44623442009-01-01 10:12:23 +10302614 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002615
2616 /* Don't print started cpu buffer for the first entry of the trace */
2617 if (iter->idx > 1)
2618 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2619 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002620}
2621
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002622static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002623{
Steven Rostedt214023c2008-05-12 21:20:46 +02002624 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002625 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002626 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002627 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002628
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002629 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002630
Steven Rostedta3097202008-11-07 22:36:02 -05002631 test_cpu_buff_start(iter);
2632
Steven Rostedtf633cef2008-12-23 23:24:13 -05002633 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002634
2635 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002636 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2637 if (!trace_print_lat_context(iter))
2638 goto partial;
2639 } else {
2640 if (!trace_print_context(iter))
2641 goto partial;
2642 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002643 }
2644
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002645 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002646 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002647
2648 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2649 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002650
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002651 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002652partial:
2653 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654}
2655
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002656static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002657{
2658 struct trace_seq *s = &iter->seq;
2659 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002660 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002661
2662 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002663
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002664 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002665 if (!trace_seq_printf(s, "%d %d %llu ",
2666 entry->pid, iter->cpu, iter->ts))
2667 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002668 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002669
Steven Rostedtf633cef2008-12-23 23:24:13 -05002670 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002671 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002673
2674 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2675 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002676
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002677 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002678partial:
2679 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002680}
2681
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002682static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002683{
2684 struct trace_seq *s = &iter->seq;
2685 unsigned char newline = '\n';
2686 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002687 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002688
2689 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002690
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002691 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2692 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2693 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2694 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2695 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002696
Steven Rostedtf633cef2008-12-23 23:24:13 -05002697 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002698 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002699 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002700 if (ret != TRACE_TYPE_HANDLED)
2701 return ret;
2702 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002703
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002704 SEQ_PUT_FIELD_RET(s, newline);
2705
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002706 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002707}
2708
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002709static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002710{
2711 struct trace_seq *s = &iter->seq;
2712 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002713 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002714
2715 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002716
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002717 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2718 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002719 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002720 SEQ_PUT_FIELD_RET(s, iter->ts);
2721 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002722
Steven Rostedtf633cef2008-12-23 23:24:13 -05002723 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002724 return event ? event->funcs->binary(iter, 0, event) :
2725 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002726}
2727
Jiri Olsa62b915f2010-04-02 19:01:22 +02002728int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002729{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002730 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002731 int cpu;
2732
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002733 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002734 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002735 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002736 buf_iter = trace_buffer_iter(iter, cpu);
2737 if (buf_iter) {
2738 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002739 return 0;
2740 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002741 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002742 return 0;
2743 }
2744 return 1;
2745 }
2746
Steven Rostedtab464282008-05-12 21:21:00 +02002747 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002748 buf_iter = trace_buffer_iter(iter, cpu);
2749 if (buf_iter) {
2750 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002751 return 0;
2752 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002753 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002754 return 0;
2755 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002756 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002757
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002758 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002759}
2760
Lai Jiangshan4f535962009-05-18 19:35:34 +08002761/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002762enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002763{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002764 enum print_line_t ret;
2765
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002766 if (iter->lost_events &&
2767 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2768 iter->cpu, iter->lost_events))
2769 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002770
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002771 if (iter->trace && iter->trace->print_line) {
2772 ret = iter->trace->print_line(iter);
2773 if (ret != TRACE_TYPE_UNHANDLED)
2774 return ret;
2775 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002776
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002777 if (iter->ent->type == TRACE_BPUTS &&
2778 trace_flags & TRACE_ITER_PRINTK &&
2779 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2780 return trace_print_bputs_msg_only(iter);
2781
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002782 if (iter->ent->type == TRACE_BPRINT &&
2783 trace_flags & TRACE_ITER_PRINTK &&
2784 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002785 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002786
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002787 if (iter->ent->type == TRACE_PRINT &&
2788 trace_flags & TRACE_ITER_PRINTK &&
2789 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002790 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002791
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002792 if (trace_flags & TRACE_ITER_BIN)
2793 return print_bin_fmt(iter);
2794
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002795 if (trace_flags & TRACE_ITER_HEX)
2796 return print_hex_fmt(iter);
2797
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002798 if (trace_flags & TRACE_ITER_RAW)
2799 return print_raw_fmt(iter);
2800
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002801 return print_trace_fmt(iter);
2802}
2803
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002804void trace_latency_header(struct seq_file *m)
2805{
2806 struct trace_iterator *iter = m->private;
2807
2808 /* print nothing if the buffers are empty */
2809 if (trace_empty(iter))
2810 return;
2811
2812 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2813 print_trace_header(m, iter);
2814
2815 if (!(trace_flags & TRACE_ITER_VERBOSE))
2816 print_lat_help_header(m);
2817}
2818
Jiri Olsa62b915f2010-04-02 19:01:22 +02002819void trace_default_header(struct seq_file *m)
2820{
2821 struct trace_iterator *iter = m->private;
2822
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002823 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2824 return;
2825
Jiri Olsa62b915f2010-04-02 19:01:22 +02002826 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2827 /* print nothing if the buffers are empty */
2828 if (trace_empty(iter))
2829 return;
2830 print_trace_header(m, iter);
2831 if (!(trace_flags & TRACE_ITER_VERBOSE))
2832 print_lat_help_header(m);
2833 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002834 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2835 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002836 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002837 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002838 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002839 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002840 }
2841}
2842
Steven Rostedte0a413f2011-09-29 21:26:16 -04002843static void test_ftrace_alive(struct seq_file *m)
2844{
2845 if (!ftrace_is_dead())
2846 return;
2847 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2848 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2849}
2850
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002851#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002852static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002853{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002854 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2855 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2856 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002857 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002858 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2859 seq_printf(m, "# is not a '0' or '1')\n");
2860}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002861
2862static void show_snapshot_percpu_help(struct seq_file *m)
2863{
2864 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2865#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2866 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2867 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2868#else
2869 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2870 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2871#endif
2872 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2873 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2874 seq_printf(m, "# is not a '0' or '1')\n");
2875}
2876
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002877static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2878{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002879 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2881 else
2882 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2883
2884 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002885 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2886 show_snapshot_main_help(m);
2887 else
2888 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002889}
2890#else
2891/* Should never be called */
2892static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2893#endif
2894
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002895static int s_show(struct seq_file *m, void *v)
2896{
2897 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002898 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002899
2900 if (iter->ent == NULL) {
2901 if (iter->tr) {
2902 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2903 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002904 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002905 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002906 if (iter->snapshot && trace_empty(iter))
2907 print_snapshot_help(m, iter);
2908 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002909 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002910 else
2911 trace_default_header(m);
2912
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002913 } else if (iter->leftover) {
2914 /*
2915 * If we filled the seq_file buffer earlier, we
2916 * want to just show it now.
2917 */
2918 ret = trace_print_seq(m, &iter->seq);
2919
2920 /* ret should this time be zero, but you never know */
2921 iter->leftover = ret;
2922
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002924 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002925 ret = trace_print_seq(m, &iter->seq);
2926 /*
2927 * If we overflow the seq_file buffer, then it will
2928 * ask us for this data again at start up.
2929 * Use that instead.
2930 * ret is 0 if seq_file write succeeded.
2931 * -1 otherwise.
2932 */
2933 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002934 }
2935
2936 return 0;
2937}
2938
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002939/*
2940 * Should be used after trace_array_get(), trace_types_lock
2941 * ensures that i_cdev was already initialized.
2942 */
2943static inline int tracing_get_cpu(struct inode *inode)
2944{
2945 if (inode->i_cdev) /* See trace_create_cpu_file() */
2946 return (long)inode->i_cdev - 1;
2947 return RING_BUFFER_ALL_CPUS;
2948}
2949
James Morris88e9d342009-09-22 16:43:43 -07002950static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002951 .start = s_start,
2952 .next = s_next,
2953 .stop = s_stop,
2954 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002955};
2956
Ingo Molnare309b412008-05-12 21:20:51 +02002957static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002958__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002960 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002962 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002963
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002964 if (tracing_disabled)
2965 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002966
Jiri Olsa50e18b92012-04-25 10:23:39 +02002967 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002968 if (!iter)
2969 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002970
Steven Rostedt6d158a82012-06-27 20:46:14 -04002971 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2972 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002973 if (!iter->buffer_iter)
2974 goto release;
2975
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002976 /*
2977 * We make a copy of the current tracer to avoid concurrent
2978 * changes on it while we are reading.
2979 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002980 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002981 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002982 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002983 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002984
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002985 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002986
Li Zefan79f55992009-06-15 14:58:26 +08002987 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002988 goto fail;
2989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002990 iter->tr = tr;
2991
2992#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002993 /* Currently only the top directory has a snapshot */
2994 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002995 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002997#endif
2998 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002999 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003000 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003001 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003002 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003004 /* Notify the tracer early; before we stop tracing. */
3005 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003006 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003007
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003008 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003009 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003010 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3011
David Sharp8be07092012-11-13 12:18:22 -08003012 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003013 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003014 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3015
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003016 /* stop the trace while dumping if we are not opening "snapshot" */
3017 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003018 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003019
Steven Rostedtae3b5092013-01-23 15:22:59 -05003020 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003021 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003022 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003024 }
3025 ring_buffer_read_prepare_sync();
3026 for_each_tracing_cpu(cpu) {
3027 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003028 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003029 }
3030 } else {
3031 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003032 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003033 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003034 ring_buffer_read_prepare_sync();
3035 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003036 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003037 }
3038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003039 mutex_unlock(&trace_types_lock);
3040
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003041 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003042
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003043 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003044 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003045 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003046 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003047release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003048 seq_release_private(inode, file);
3049 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050}
3051
3052int tracing_open_generic(struct inode *inode, struct file *filp)
3053{
Steven Rostedt60a11772008-05-12 21:20:44 +02003054 if (tracing_disabled)
3055 return -ENODEV;
3056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057 filp->private_data = inode->i_private;
3058 return 0;
3059}
3060
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003061bool tracing_is_disabled(void)
3062{
3063 return (tracing_disabled) ? true: false;
3064}
3065
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003066/*
3067 * Open and update trace_array ref count.
3068 * Must have the current trace_array passed to it.
3069 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003070static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003071{
3072 struct trace_array *tr = inode->i_private;
3073
3074 if (tracing_disabled)
3075 return -ENODEV;
3076
3077 if (trace_array_get(tr) < 0)
3078 return -ENODEV;
3079
3080 filp->private_data = inode->i_private;
3081
3082 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003083}
3084
Hannes Eder4fd27352009-02-10 19:44:12 +01003085static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003086{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003087 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003088 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003089 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003090 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003091
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003092 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003093 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003094 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003095 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003096
Oleg Nesterov6484c712013-07-23 17:26:10 +02003097 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003098 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003099 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003100
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003101 for_each_tracing_cpu(cpu) {
3102 if (iter->buffer_iter[cpu])
3103 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3104 }
3105
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003106 if (iter->trace && iter->trace->close)
3107 iter->trace->close(iter);
3108
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003109 if (!iter->snapshot)
3110 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003111 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003112
3113 __trace_array_put(tr);
3114
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003115 mutex_unlock(&trace_types_lock);
3116
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003117 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003118 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003119 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003120 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003121 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003122
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003123 return 0;
3124}
3125
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003126static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3127{
3128 struct trace_array *tr = inode->i_private;
3129
3130 trace_array_put(tr);
3131 return 0;
3132}
3133
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003134static int tracing_single_release_tr(struct inode *inode, struct file *file)
3135{
3136 struct trace_array *tr = inode->i_private;
3137
3138 trace_array_put(tr);
3139
3140 return single_release(inode, file);
3141}
3142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003143static int tracing_open(struct inode *inode, struct file *file)
3144{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003145 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003146 struct trace_iterator *iter;
3147 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003148
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003149 if (trace_array_get(tr) < 0)
3150 return -ENODEV;
3151
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003152 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003153 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3154 int cpu = tracing_get_cpu(inode);
3155
3156 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003157 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003158 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003159 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003160 }
3161
3162 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003163 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003164 if (IS_ERR(iter))
3165 ret = PTR_ERR(iter);
3166 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3167 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3168 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003169
3170 if (ret < 0)
3171 trace_array_put(tr);
3172
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003173 return ret;
3174}
3175
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003176/*
3177 * Some tracers are not suitable for instance buffers.
3178 * A tracer is always available for the global array (toplevel)
3179 * or if it explicitly states that it is.
3180 */
3181static bool
3182trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3183{
3184 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3185}
3186
3187/* Find the next tracer that this trace array may use */
3188static struct tracer *
3189get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3190{
3191 while (t && !trace_ok_for_array(t, tr))
3192 t = t->next;
3193
3194 return t;
3195}
3196
Ingo Molnare309b412008-05-12 21:20:51 +02003197static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003198t_next(struct seq_file *m, void *v, loff_t *pos)
3199{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003200 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003201 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003202
3203 (*pos)++;
3204
3205 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003206 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003207
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003208 return t;
3209}
3210
3211static void *t_start(struct seq_file *m, loff_t *pos)
3212{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003213 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003214 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003215 loff_t l = 0;
3216
3217 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003218
3219 t = get_tracer_for_array(tr, trace_types);
3220 for (; t && l < *pos; t = t_next(m, t, &l))
3221 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222
3223 return t;
3224}
3225
3226static void t_stop(struct seq_file *m, void *p)
3227{
3228 mutex_unlock(&trace_types_lock);
3229}
3230
3231static int t_show(struct seq_file *m, void *v)
3232{
3233 struct tracer *t = v;
3234
3235 if (!t)
3236 return 0;
3237
3238 seq_printf(m, "%s", t->name);
3239 if (t->next)
3240 seq_putc(m, ' ');
3241 else
3242 seq_putc(m, '\n');
3243
3244 return 0;
3245}
3246
James Morris88e9d342009-09-22 16:43:43 -07003247static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003248 .start = t_start,
3249 .next = t_next,
3250 .stop = t_stop,
3251 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003252};
3253
3254static int show_traces_open(struct inode *inode, struct file *file)
3255{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003256 struct trace_array *tr = inode->i_private;
3257 struct seq_file *m;
3258 int ret;
3259
Steven Rostedt60a11772008-05-12 21:20:44 +02003260 if (tracing_disabled)
3261 return -ENODEV;
3262
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003263 ret = seq_open(file, &show_traces_seq_ops);
3264 if (ret)
3265 return ret;
3266
3267 m = file->private_data;
3268 m->private = tr;
3269
3270 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003271}
3272
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003273static ssize_t
3274tracing_write_stub(struct file *filp, const char __user *ubuf,
3275 size_t count, loff_t *ppos)
3276{
3277 return count;
3278}
3279
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003280loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003281{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003282 int ret;
3283
Slava Pestov364829b2010-11-24 15:13:16 -08003284 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003285 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003286 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003287 file->f_pos = ret = 0;
3288
3289 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003290}
3291
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003292static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003293 .open = tracing_open,
3294 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003295 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003296 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003297 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003298};
3299
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003300static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003301 .open = show_traces_open,
3302 .read = seq_read,
3303 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003304 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003305};
3306
Ingo Molnar36dfe922008-05-12 21:20:52 +02003307/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003308 * The tracer itself will not take this lock, but still we want
3309 * to provide a consistent cpumask to user-space:
3310 */
3311static DEFINE_MUTEX(tracing_cpumask_update_lock);
3312
3313/*
3314 * Temporary storage for the character representation of the
3315 * CPU bitmask (and one more byte for the newline):
3316 */
3317static char mask_str[NR_CPUS + 1];
3318
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319static ssize_t
3320tracing_cpumask_read(struct file *filp, char __user *ubuf,
3321 size_t count, loff_t *ppos)
3322{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003323 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003324 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003325
3326 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003327
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003328 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003329 if (count - len < 2) {
3330 count = -EINVAL;
3331 goto out_err;
3332 }
3333 len += sprintf(mask_str + len, "\n");
3334 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3335
3336out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003337 mutex_unlock(&tracing_cpumask_update_lock);
3338
3339 return count;
3340}
3341
3342static ssize_t
3343tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3344 size_t count, loff_t *ppos)
3345{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003346 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303347 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003348 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303349
3350 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3351 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303353 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003354 if (err)
3355 goto err_unlock;
3356
Li Zefan215368e2009-06-15 10:56:42 +08003357 mutex_lock(&tracing_cpumask_update_lock);
3358
Steven Rostedta5e25882008-12-02 15:34:05 -05003359 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003360 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003361 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003362 /*
3363 * Increase/decrease the disabled counter if we are
3364 * about to flip a bit in the cpumask:
3365 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003366 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303367 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003368 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3369 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003370 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003371 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303372 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003373 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3374 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003375 }
3376 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003377 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003378 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003379
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003380 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003381
Ingo Molnarc7078de2008-05-12 21:20:52 +02003382 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303383 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003384
Ingo Molnarc7078de2008-05-12 21:20:52 +02003385 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003386
3387err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003388 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003389
3390 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003391}
3392
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003393static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003394 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003395 .read = tracing_cpumask_read,
3396 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003397 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003398 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003399};
3400
Li Zefanfdb372e2009-12-08 11:15:59 +08003401static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003402{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003403 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003404 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003405 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003406 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003407
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003408 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003409 tracer_flags = tr->current_trace->flags->val;
3410 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003411
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003412 for (i = 0; trace_options[i]; i++) {
3413 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003414 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003415 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003416 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417 }
3418
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003419 for (i = 0; trace_opts[i].name; i++) {
3420 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003421 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003422 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003423 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003424 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003425 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003426
Li Zefanfdb372e2009-12-08 11:15:59 +08003427 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428}
3429
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003430static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003431 struct tracer_flags *tracer_flags,
3432 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003433{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003434 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003435 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003436
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003437 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003438 if (ret)
3439 return ret;
3440
3441 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003442 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003443 else
Zhaolei77708412009-08-07 18:53:21 +08003444 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003445 return 0;
3446}
3447
Li Zefan8d18eaa2009-12-08 11:17:06 +08003448/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003449static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003450{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003451 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003452 struct tracer_flags *tracer_flags = trace->flags;
3453 struct tracer_opt *opts = NULL;
3454 int i;
3455
3456 for (i = 0; tracer_flags->opts[i].name; i++) {
3457 opts = &tracer_flags->opts[i];
3458
3459 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003460 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003461 }
3462
3463 return -EINVAL;
3464}
3465
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003466/* Some tracers require overwrite to stay enabled */
3467int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3468{
3469 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3470 return -1;
3471
3472 return 0;
3473}
3474
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003475int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003476{
3477 /* do nothing if flag is already set */
3478 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003479 return 0;
3480
3481 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003482 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003483 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003484 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003485
3486 if (enabled)
3487 trace_flags |= mask;
3488 else
3489 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003490
3491 if (mask == TRACE_ITER_RECORD_CMD)
3492 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003493
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003494 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003495 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003496#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003497 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003498#endif
3499 }
Steven Rostedt81698832012-10-11 10:15:05 -04003500
3501 if (mask == TRACE_ITER_PRINTK)
3502 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003503
3504 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003505}
3506
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003507static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003508{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003509 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003510 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003511 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003512 int i;
3513
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003514 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003515
Li Zefan8d18eaa2009-12-08 11:17:06 +08003516 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003517 neg = 1;
3518 cmp += 2;
3519 }
3520
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003521 mutex_lock(&trace_types_lock);
3522
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003523 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003524 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003525 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526 break;
3527 }
3528 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003529
3530 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003531 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003532 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003533
3534 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003536 return ret;
3537}
3538
3539static ssize_t
3540tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3541 size_t cnt, loff_t *ppos)
3542{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003543 struct seq_file *m = filp->private_data;
3544 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003545 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003546 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003547
3548 if (cnt >= sizeof(buf))
3549 return -EINVAL;
3550
3551 if (copy_from_user(&buf, ubuf, cnt))
3552 return -EFAULT;
3553
Steven Rostedta8dd2172013-01-09 20:54:17 -05003554 buf[cnt] = 0;
3555
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003556 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003557 if (ret < 0)
3558 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003559
Jiri Olsacf8517c2009-10-23 19:36:16 -04003560 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003561
3562 return cnt;
3563}
3564
Li Zefanfdb372e2009-12-08 11:15:59 +08003565static int tracing_trace_options_open(struct inode *inode, struct file *file)
3566{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003567 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003568 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003569
Li Zefanfdb372e2009-12-08 11:15:59 +08003570 if (tracing_disabled)
3571 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003572
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003573 if (trace_array_get(tr) < 0)
3574 return -ENODEV;
3575
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003576 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3577 if (ret < 0)
3578 trace_array_put(tr);
3579
3580 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003581}
3582
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003583static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003584 .open = tracing_trace_options_open,
3585 .read = seq_read,
3586 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003587 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003588 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589};
3590
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003591static const char readme_msg[] =
3592 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003593 "# echo 0 > tracing_on : quick way to disable tracing\n"
3594 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3595 " Important files:\n"
3596 " trace\t\t\t- The static contents of the buffer\n"
3597 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3598 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3599 " current_tracer\t- function and latency tracers\n"
3600 " available_tracers\t- list of configured tracers for current_tracer\n"
3601 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3602 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3603 " trace_clock\t\t-change the clock used to order events\n"
3604 " local: Per cpu clock but may not be synced across CPUs\n"
3605 " global: Synced across CPUs but slows tracing down.\n"
3606 " counter: Not a clock, but just an increment\n"
3607 " uptime: Jiffy counter from time of boot\n"
3608 " perf: Same clock that perf events use\n"
3609#ifdef CONFIG_X86_64
3610 " x86-tsc: TSC cycle counter\n"
3611#endif
3612 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3613 " tracing_cpumask\t- Limit which CPUs to trace\n"
3614 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3615 "\t\t\t Remove sub-buffer with rmdir\n"
3616 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003617 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3618 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003619 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003620#ifdef CONFIG_DYNAMIC_FTRACE
3621 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003622 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3623 "\t\t\t functions\n"
3624 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3625 "\t modules: Can select a group via module\n"
3626 "\t Format: :mod:<module-name>\n"
3627 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3628 "\t triggers: a command to perform when function is hit\n"
3629 "\t Format: <function>:<trigger>[:count]\n"
3630 "\t trigger: traceon, traceoff\n"
3631 "\t\t enable_event:<system>:<event>\n"
3632 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003633#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003634 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003635#endif
3636#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003637 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003638#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003639 "\t\t dump\n"
3640 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003641 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3642 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3643 "\t The first one will disable tracing every time do_fault is hit\n"
3644 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3645 "\t The first time do trap is hit and it disables tracing, the\n"
3646 "\t counter will decrement to 2. If tracing is already disabled,\n"
3647 "\t the counter will not decrement. It only decrements when the\n"
3648 "\t trigger did work\n"
3649 "\t To remove trigger without count:\n"
3650 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3651 "\t To remove trigger with a count:\n"
3652 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003653 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003654 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3655 "\t modules: Can select a group via module command :mod:\n"
3656 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003657#endif /* CONFIG_DYNAMIC_FTRACE */
3658#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003659 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3660 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003661#endif
3662#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3663 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003664 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003665 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3666#endif
3667#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003668 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3669 "\t\t\t snapshot buffer. Read the contents for more\n"
3670 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003671#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003672#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003673 " stack_trace\t\t- Shows the max stack trace when active\n"
3674 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003675 "\t\t\t Write into this file to reset the max size (trigger a\n"
3676 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003677#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003678 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3679 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003680#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003681#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003682 " events/\t\t- Directory containing all trace event subsystems:\n"
3683 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3684 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003685 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3686 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003687 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003688 " events/<system>/<event>/\t- Directory containing control files for\n"
3689 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003690 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3691 " filter\t\t- If set, only events passing filter are traced\n"
3692 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 "\t Format: <trigger>[:count][if <filter>]\n"
3694 "\t trigger: traceon, traceoff\n"
3695 "\t enable_event:<system>:<event>\n"
3696 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003697#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003698 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003699#endif
3700#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003701 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003702#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3704 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3705 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3706 "\t events/block/block_unplug/trigger\n"
3707 "\t The first disables tracing every time block_unplug is hit.\n"
3708 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3709 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3710 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3711 "\t Like function triggers, the counter is only decremented if it\n"
3712 "\t enabled or disabled tracing.\n"
3713 "\t To remove a trigger without a count:\n"
3714 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3715 "\t To remove a trigger with a count:\n"
3716 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3717 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003718;
3719
3720static ssize_t
3721tracing_readme_read(struct file *filp, char __user *ubuf,
3722 size_t cnt, loff_t *ppos)
3723{
3724 return simple_read_from_buffer(ubuf, cnt, ppos,
3725 readme_msg, strlen(readme_msg));
3726}
3727
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003728static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003729 .open = tracing_open_generic,
3730 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003731 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003732};
3733
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003734static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003735{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003736 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003737
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003738 if (*pos || m->count)
3739 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003740
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003741 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003742
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003743 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3744 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003745 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003746 continue;
3747
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003748 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003749 }
3750
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003751 return NULL;
3752}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003753
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003754static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3755{
3756 void *v;
3757 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003758
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003759 preempt_disable();
3760 arch_spin_lock(&trace_cmdline_lock);
3761
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003762 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 while (l <= *pos) {
3764 v = saved_cmdlines_next(m, v, &l);
3765 if (!v)
3766 return NULL;
3767 }
3768
3769 return v;
3770}
3771
3772static void saved_cmdlines_stop(struct seq_file *m, void *v)
3773{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003774 arch_spin_unlock(&trace_cmdline_lock);
3775 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003776}
3777
3778static int saved_cmdlines_show(struct seq_file *m, void *v)
3779{
3780 char buf[TASK_COMM_LEN];
3781 unsigned int *pid = v;
3782
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003783 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003784 seq_printf(m, "%d %s\n", *pid, buf);
3785 return 0;
3786}
3787
3788static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3789 .start = saved_cmdlines_start,
3790 .next = saved_cmdlines_next,
3791 .stop = saved_cmdlines_stop,
3792 .show = saved_cmdlines_show,
3793};
3794
3795static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3796{
3797 if (tracing_disabled)
3798 return -ENODEV;
3799
3800 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003801}
3802
3803static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003804 .open = tracing_saved_cmdlines_open,
3805 .read = seq_read,
3806 .llseek = seq_lseek,
3807 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003808};
3809
3810static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003811tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3812 size_t cnt, loff_t *ppos)
3813{
3814 char buf[64];
3815 int r;
3816
3817 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003818 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003819 arch_spin_unlock(&trace_cmdline_lock);
3820
3821 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3822}
3823
3824static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3825{
3826 kfree(s->saved_cmdlines);
3827 kfree(s->map_cmdline_to_pid);
3828 kfree(s);
3829}
3830
3831static int tracing_resize_saved_cmdlines(unsigned int val)
3832{
3833 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3834
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003835 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003836 if (!s)
3837 return -ENOMEM;
3838
3839 if (allocate_cmdlines_buffer(val, s) < 0) {
3840 kfree(s);
3841 return -ENOMEM;
3842 }
3843
3844 arch_spin_lock(&trace_cmdline_lock);
3845 savedcmd_temp = savedcmd;
3846 savedcmd = s;
3847 arch_spin_unlock(&trace_cmdline_lock);
3848 free_saved_cmdlines_buffer(savedcmd_temp);
3849
3850 return 0;
3851}
3852
3853static ssize_t
3854tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3855 size_t cnt, loff_t *ppos)
3856{
3857 unsigned long val;
3858 int ret;
3859
3860 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3861 if (ret)
3862 return ret;
3863
3864 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3865 if (!val || val > PID_MAX_DEFAULT)
3866 return -EINVAL;
3867
3868 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3869 if (ret < 0)
3870 return ret;
3871
3872 *ppos += cnt;
3873
3874 return cnt;
3875}
3876
3877static const struct file_operations tracing_saved_cmdlines_size_fops = {
3878 .open = tracing_open_generic,
3879 .read = tracing_saved_cmdlines_size_read,
3880 .write = tracing_saved_cmdlines_size_write,
3881};
3882
3883static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884tracing_set_trace_read(struct file *filp, char __user *ubuf,
3885 size_t cnt, loff_t *ppos)
3886{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003887 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003888 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003889 int r;
3890
3891 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003892 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003893 mutex_unlock(&trace_types_lock);
3894
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003895 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003896}
3897
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003898int tracer_init(struct tracer *t, struct trace_array *tr)
3899{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003900 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003901 return t->init(tr);
3902}
3903
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003904static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003905{
3906 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003907
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003908 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003909 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003910}
3911
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003912#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003913/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003914static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3915 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003916{
3917 int cpu, ret = 0;
3918
3919 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3920 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003921 ret = ring_buffer_resize(trace_buf->buffer,
3922 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003923 if (ret < 0)
3924 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003925 per_cpu_ptr(trace_buf->data, cpu)->entries =
3926 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003927 }
3928 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929 ret = ring_buffer_resize(trace_buf->buffer,
3930 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003931 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003932 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3933 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003934 }
3935
3936 return ret;
3937}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003938#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003939
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003940static int __tracing_resize_ring_buffer(struct trace_array *tr,
3941 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003942{
3943 int ret;
3944
3945 /*
3946 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003947 * we use the size that was given, and we can forget about
3948 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003949 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003950 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003951
Steven Rostedtb382ede62012-10-10 21:44:34 -04003952 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003953 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003954 return 0;
3955
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003957 if (ret < 0)
3958 return ret;
3959
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003960#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003961 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3962 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003963 goto out;
3964
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003965 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003966 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003967 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3968 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003969 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003970 /*
3971 * AARGH! We are left with different
3972 * size max buffer!!!!
3973 * The max buffer is our "snapshot" buffer.
3974 * When a tracer needs a snapshot (one of the
3975 * latency tracers), it swaps the max buffer
3976 * with the saved snap shot. We succeeded to
3977 * update the size of the main buffer, but failed to
3978 * update the size of the max buffer. But when we tried
3979 * to reset the main buffer to the original size, we
3980 * failed there too. This is very unlikely to
3981 * happen, but if it does, warn and kill all
3982 * tracing.
3983 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 WARN_ON(1);
3985 tracing_disabled = 1;
3986 }
3987 return ret;
3988 }
3989
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003990 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003991 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003992 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003993 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003994
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003995 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003996#endif /* CONFIG_TRACER_MAX_TRACE */
3997
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003998 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003999 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004000 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004001 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004002
4003 return ret;
4004}
4005
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004006static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4007 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004008{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004009 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004010
4011 mutex_lock(&trace_types_lock);
4012
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004013 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4014 /* make sure, this cpu is enabled in the mask */
4015 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4016 ret = -EINVAL;
4017 goto out;
4018 }
4019 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004020
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004021 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004022 if (ret < 0)
4023 ret = -ENOMEM;
4024
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004026 mutex_unlock(&trace_types_lock);
4027
4028 return ret;
4029}
4030
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004031
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004032/**
4033 * tracing_update_buffers - used by tracing facility to expand ring buffers
4034 *
4035 * To save on memory when the tracing is never used on a system with it
4036 * configured in. The ring buffers are set to a minimum size. But once
4037 * a user starts to use the tracing facility, then they need to grow
4038 * to their default size.
4039 *
4040 * This function is to be called when a tracer is about to be used.
4041 */
4042int tracing_update_buffers(void)
4043{
4044 int ret = 0;
4045
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004046 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004047 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004048 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004049 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004050 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004051
4052 return ret;
4053}
4054
Steven Rostedt577b7852009-02-26 23:43:05 -05004055struct trace_option_dentry;
4056
4057static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004058create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004059
4060static void
4061destroy_trace_option_files(struct trace_option_dentry *topts);
4062
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004063/*
4064 * Used to clear out the tracer before deletion of an instance.
4065 * Must have trace_types_lock held.
4066 */
4067static void tracing_set_nop(struct trace_array *tr)
4068{
4069 if (tr->current_trace == &nop_trace)
4070 return;
4071
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004072 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004073
4074 if (tr->current_trace->reset)
4075 tr->current_trace->reset(tr);
4076
4077 tr->current_trace = &nop_trace;
4078}
4079
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004080static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004081{
Steven Rostedt577b7852009-02-26 23:43:05 -05004082 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004083 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004084#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004085 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004086#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004087 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004088
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004089 mutex_lock(&trace_types_lock);
4090
Steven Rostedt73c51622009-03-11 13:42:01 -04004091 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004092 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004093 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004094 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004095 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004096 ret = 0;
4097 }
4098
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099 for (t = trace_types; t; t = t->next) {
4100 if (strcmp(t->name, buf) == 0)
4101 break;
4102 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004103 if (!t) {
4104 ret = -EINVAL;
4105 goto out;
4106 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004107 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108 goto out;
4109
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004110 /* Some tracers are only allowed for the top level buffer */
4111 if (!trace_ok_for_array(t, tr)) {
4112 ret = -EINVAL;
4113 goto out;
4114 }
4115
Steven Rostedt9f029e82008-11-12 15:24:24 -05004116 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004117
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004118 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004119
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004120 if (tr->current_trace->reset)
4121 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004122
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004123 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004124 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004125
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004126#ifdef CONFIG_TRACER_MAX_TRACE
4127 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004128
4129 if (had_max_tr && !t->use_max_tr) {
4130 /*
4131 * We need to make sure that the update_max_tr sees that
4132 * current_trace changed to nop_trace to keep it from
4133 * swapping the buffers after we resize it.
4134 * The update_max_tr is called from interrupts disabled
4135 * so a synchronized_sched() is sufficient.
4136 */
4137 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004138 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004139 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004140#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004141 /* Currently, only the top instance has options */
4142 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4143 destroy_trace_option_files(topts);
4144 topts = create_trace_option_files(tr, t);
4145 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004146
4147#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004148 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004149 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004150 if (ret < 0)
4151 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004152 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004153#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004154
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004155 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004156 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004157 if (ret)
4158 goto out;
4159 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004160
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004161 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004162 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004163 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004164 out:
4165 mutex_unlock(&trace_types_lock);
4166
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004167 return ret;
4168}
4169
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004170static ssize_t
4171tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4172 size_t cnt, loff_t *ppos)
4173{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004174 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004175 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004176 int i;
4177 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004178 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004179
Steven Rostedt60063a62008-10-28 10:44:24 -04004180 ret = cnt;
4181
Li Zefanee6c2c12009-09-18 14:06:47 +08004182 if (cnt > MAX_TRACER_SIZE)
4183 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004184
4185 if (copy_from_user(&buf, ubuf, cnt))
4186 return -EFAULT;
4187
4188 buf[cnt] = 0;
4189
4190 /* strip ending whitespace. */
4191 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4192 buf[i] = 0;
4193
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004194 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004195 if (err)
4196 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197
Jiri Olsacf8517c2009-10-23 19:36:16 -04004198 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004199
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004200 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201}
4202
4203static ssize_t
4204tracing_max_lat_read(struct file *filp, char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4206{
4207 unsigned long *ptr = filp->private_data;
4208 char buf[64];
4209 int r;
4210
Steven Rostedtcffae432008-05-12 21:21:00 +02004211 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004213 if (r > sizeof(buf))
4214 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004215 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004216}
4217
4218static ssize_t
4219tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4220 size_t cnt, loff_t *ppos)
4221{
Hannes Eder5e398412009-02-10 19:44:34 +01004222 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004223 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004224 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004225
Peter Huewe22fe9b52011-06-07 21:58:27 +02004226 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4227 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004228 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229
4230 *ptr = val * 1000;
4231
4232 return cnt;
4233}
4234
Steven Rostedtb3806b42008-05-12 21:20:46 +02004235static int tracing_open_pipe(struct inode *inode, struct file *filp)
4236{
Oleg Nesterov15544202013-07-23 17:25:57 +02004237 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004238 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004239 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004240
4241 if (tracing_disabled)
4242 return -ENODEV;
4243
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004244 if (trace_array_get(tr) < 0)
4245 return -ENODEV;
4246
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004247 mutex_lock(&trace_types_lock);
4248
Steven Rostedtb3806b42008-05-12 21:20:46 +02004249 /* create a buffer to store the information to pass to userspace */
4250 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004251 if (!iter) {
4252 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004253 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004254 goto out;
4255 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004256
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004257 /*
4258 * We make a copy of the current tracer to avoid concurrent
4259 * changes on it while we are reading.
4260 */
4261 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4262 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004263 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004264 goto fail;
4265 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004266 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004267
4268 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4269 ret = -ENOMEM;
4270 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304271 }
4272
Steven Rostedta3097202008-11-07 22:36:02 -05004273 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304274 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004275
Steven Rostedt112f38a72009-06-01 15:16:05 -04004276 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4277 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4278
David Sharp8be07092012-11-13 12:18:22 -08004279 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004280 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004281 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4282
Oleg Nesterov15544202013-07-23 17:25:57 +02004283 iter->tr = tr;
4284 iter->trace_buffer = &tr->trace_buffer;
4285 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004286 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004287 filp->private_data = iter;
4288
Steven Rostedt107bad82008-05-12 21:21:01 +02004289 if (iter->trace->pipe_open)
4290 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004291
Arnd Bergmannb4447862010-07-07 23:40:11 +02004292 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004293out:
4294 mutex_unlock(&trace_types_lock);
4295 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004296
4297fail:
4298 kfree(iter->trace);
4299 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004300 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004301 mutex_unlock(&trace_types_lock);
4302 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004303}
4304
4305static int tracing_release_pipe(struct inode *inode, struct file *file)
4306{
4307 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004308 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004309
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004310 mutex_lock(&trace_types_lock);
4311
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004312 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004313 iter->trace->pipe_close(iter);
4314
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004315 mutex_unlock(&trace_types_lock);
4316
Rusty Russell44623442009-01-01 10:12:23 +10304317 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004318 mutex_destroy(&iter->mutex);
4319 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004320 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004321
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004322 trace_array_put(tr);
4323
Steven Rostedtb3806b42008-05-12 21:20:46 +02004324 return 0;
4325}
4326
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004327static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004328trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004329{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004330 /* Iterators are static, they should be filled or empty */
4331 if (trace_buffer_iter(iter, iter->cpu_file))
4332 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004333
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004334 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004335 /*
4336 * Always select as readable when in blocking mode
4337 */
4338 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004339 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004340 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004341 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004342}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004343
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004344static unsigned int
4345tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4346{
4347 struct trace_iterator *iter = filp->private_data;
4348
4349 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004350}
4351
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004352/* Must be called with trace_types_lock mutex held. */
4353static int tracing_wait_pipe(struct file *filp)
4354{
4355 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004356 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004357
4358 while (trace_empty(iter)) {
4359
4360 if ((filp->f_flags & O_NONBLOCK)) {
4361 return -EAGAIN;
4362 }
4363
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004364 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004365 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004366 * We still block if tracing is disabled, but we have never
4367 * read anything. This allows a user to cat this file, and
4368 * then enable tracing. But after we have read something,
4369 * we give an EOF when tracing is again disabled.
4370 *
4371 * iter->pos will be 0 if we haven't read anything.
4372 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004373 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004374 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004375
4376 mutex_unlock(&iter->mutex);
4377
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004378 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004379
4380 mutex_lock(&iter->mutex);
4381
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004382 if (ret)
4383 return ret;
4384
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004385 if (signal_pending(current))
4386 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004387 }
4388
4389 return 1;
4390}
4391
Steven Rostedtb3806b42008-05-12 21:20:46 +02004392/*
4393 * Consumer reader.
4394 */
4395static ssize_t
4396tracing_read_pipe(struct file *filp, char __user *ubuf,
4397 size_t cnt, loff_t *ppos)
4398{
4399 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004400 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004401 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004402
4403 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004404 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4405 if (sret != -EBUSY)
4406 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004407
Steven Rostedtf9520752009-03-02 14:04:40 -05004408 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004409
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004410 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004411 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004412 if (unlikely(iter->trace->name != tr->current_trace->name))
4413 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004414 mutex_unlock(&trace_types_lock);
4415
4416 /*
4417 * Avoid more than one consumer on a single file descriptor
4418 * This is just a matter of traces coherency, the ring buffer itself
4419 * is protected.
4420 */
4421 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004422 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004423 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4424 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004425 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004426 }
4427
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004428waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004429 sret = tracing_wait_pipe(filp);
4430 if (sret <= 0)
4431 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004432
4433 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004434 if (trace_empty(iter)) {
4435 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004436 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004437 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004438
4439 if (cnt >= PAGE_SIZE)
4440 cnt = PAGE_SIZE - 1;
4441
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004442 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004443 memset(&iter->seq, 0,
4444 sizeof(struct trace_iterator) -
4445 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004446 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004447 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004448
Lai Jiangshan4f535962009-05-18 19:35:34 +08004449 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004450 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004451 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004452 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004453 int len = iter->seq.len;
4454
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004455 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004456 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004457 /* don't print partial lines */
4458 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004459 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004460 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004461 if (ret != TRACE_TYPE_NO_CONSUME)
4462 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004463
4464 if (iter->seq.len >= cnt)
4465 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004466
4467 /*
4468 * Setting the full flag means we reached the trace_seq buffer
4469 * size and we should leave by partial output condition above.
4470 * One of the trace_seq_* functions is not used properly.
4471 */
4472 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4473 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004474 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004475 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004476 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004477
Steven Rostedtb3806b42008-05-12 21:20:46 +02004478 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004479 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4480 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004481 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004482
4483 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004484 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004485 * entries, go back to wait for more entries.
4486 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004487 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004488 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004489
Steven Rostedt107bad82008-05-12 21:21:01 +02004490out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004491 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004492
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004493 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004494}
4495
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004496static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4497 unsigned int idx)
4498{
4499 __free_page(spd->pages[idx]);
4500}
4501
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004502static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004503 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004504 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004505 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004506 .steal = generic_pipe_buf_steal,
4507 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004508};
4509
Steven Rostedt34cd4992009-02-09 12:06:29 -05004510static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004511tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004512{
4513 size_t count;
4514 int ret;
4515
4516 /* Seq buffer is page-sized, exactly what we need. */
4517 for (;;) {
4518 count = iter->seq.len;
4519 ret = print_trace_line(iter);
4520 count = iter->seq.len - count;
4521 if (rem < count) {
4522 rem = 0;
4523 iter->seq.len -= count;
4524 break;
4525 }
4526 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4527 iter->seq.len -= count;
4528 break;
4529 }
4530
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004531 if (ret != TRACE_TYPE_NO_CONSUME)
4532 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004533 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004534 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004535 rem = 0;
4536 iter->ent = NULL;
4537 break;
4538 }
4539 }
4540
4541 return rem;
4542}
4543
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004544static ssize_t tracing_splice_read_pipe(struct file *filp,
4545 loff_t *ppos,
4546 struct pipe_inode_info *pipe,
4547 size_t len,
4548 unsigned int flags)
4549{
Jens Axboe35f3d142010-05-20 10:43:18 +02004550 struct page *pages_def[PIPE_DEF_BUFFERS];
4551 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004552 struct trace_iterator *iter = filp->private_data;
4553 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004554 .pages = pages_def,
4555 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004556 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004557 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004558 .flags = flags,
4559 .ops = &tracing_pipe_buf_ops,
4560 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004561 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004562 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004563 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004564 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004565 unsigned int i;
4566
Jens Axboe35f3d142010-05-20 10:43:18 +02004567 if (splice_grow_spd(pipe, &spd))
4568 return -ENOMEM;
4569
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004570 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004571 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004572 if (unlikely(iter->trace->name != tr->current_trace->name))
4573 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004574 mutex_unlock(&trace_types_lock);
4575
4576 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004577
4578 if (iter->trace->splice_read) {
4579 ret = iter->trace->splice_read(iter, filp,
4580 ppos, pipe, len, flags);
4581 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004582 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004583 }
4584
4585 ret = tracing_wait_pipe(filp);
4586 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004587 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004588
Jason Wessel955b61e2010-08-05 09:22:23 -05004589 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004590 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004591 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004592 }
4593
Lai Jiangshan4f535962009-05-18 19:35:34 +08004594 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004595 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004596
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004597 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004598 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004599 spd.pages[i] = alloc_page(GFP_KERNEL);
4600 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004601 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004602
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004603 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004604
4605 /* Copy the data into the page, so we can start over. */
4606 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004607 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004608 iter->seq.len);
4609 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004610 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004611 break;
4612 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004613 spd.partial[i].offset = 0;
4614 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004615
Steven Rostedtf9520752009-03-02 14:04:40 -05004616 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004617 }
4618
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004619 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004620 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004621 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004622
4623 spd.nr_pages = i;
4624
Jens Axboe35f3d142010-05-20 10:43:18 +02004625 ret = splice_to_pipe(pipe, &spd);
4626out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004627 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004628 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629
Steven Rostedt34cd4992009-02-09 12:06:29 -05004630out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004631 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004632 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004633}
4634
Steven Rostedta98a3c32008-05-12 21:20:59 +02004635static ssize_t
4636tracing_entries_read(struct file *filp, char __user *ubuf,
4637 size_t cnt, loff_t *ppos)
4638{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004639 struct inode *inode = file_inode(filp);
4640 struct trace_array *tr = inode->i_private;
4641 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004642 char buf[64];
4643 int r = 0;
4644 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004645
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004646 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004647
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004648 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004649 int cpu, buf_size_same;
4650 unsigned long size;
4651
4652 size = 0;
4653 buf_size_same = 1;
4654 /* check if all cpu sizes are same */
4655 for_each_tracing_cpu(cpu) {
4656 /* fill in the size from first enabled cpu */
4657 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004658 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4659 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004660 buf_size_same = 0;
4661 break;
4662 }
4663 }
4664
4665 if (buf_size_same) {
4666 if (!ring_buffer_expanded)
4667 r = sprintf(buf, "%lu (expanded: %lu)\n",
4668 size >> 10,
4669 trace_buf_size >> 10);
4670 else
4671 r = sprintf(buf, "%lu\n", size >> 10);
4672 } else
4673 r = sprintf(buf, "X\n");
4674 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004675 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004676
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004677 mutex_unlock(&trace_types_lock);
4678
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004679 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4680 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004681}
4682
4683static ssize_t
4684tracing_entries_write(struct file *filp, const char __user *ubuf,
4685 size_t cnt, loff_t *ppos)
4686{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004687 struct inode *inode = file_inode(filp);
4688 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004689 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004690 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004691
Peter Huewe22fe9b52011-06-07 21:58:27 +02004692 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4693 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004694 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004695
4696 /* must have at least 1 entry */
4697 if (!val)
4698 return -EINVAL;
4699
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004700 /* value is in KB */
4701 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004702 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004703 if (ret < 0)
4704 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004705
Jiri Olsacf8517c2009-10-23 19:36:16 -04004706 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004707
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004708 return cnt;
4709}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004710
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004711static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004712tracing_total_entries_read(struct file *filp, char __user *ubuf,
4713 size_t cnt, loff_t *ppos)
4714{
4715 struct trace_array *tr = filp->private_data;
4716 char buf[64];
4717 int r, cpu;
4718 unsigned long size = 0, expanded_size = 0;
4719
4720 mutex_lock(&trace_types_lock);
4721 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004722 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004723 if (!ring_buffer_expanded)
4724 expanded_size += trace_buf_size >> 10;
4725 }
4726 if (ring_buffer_expanded)
4727 r = sprintf(buf, "%lu\n", size);
4728 else
4729 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4730 mutex_unlock(&trace_types_lock);
4731
4732 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4733}
4734
4735static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004736tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4737 size_t cnt, loff_t *ppos)
4738{
4739 /*
4740 * There is no need to read what the user has written, this function
4741 * is just to make sure that there is no error when "echo" is used
4742 */
4743
4744 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004745
4746 return cnt;
4747}
4748
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004749static int
4750tracing_free_buffer_release(struct inode *inode, struct file *filp)
4751{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004752 struct trace_array *tr = inode->i_private;
4753
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004754 /* disable tracing ? */
4755 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004756 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004757 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004758 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004759
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004760 trace_array_put(tr);
4761
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004762 return 0;
4763}
4764
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004765static ssize_t
4766tracing_mark_write(struct file *filp, const char __user *ubuf,
4767 size_t cnt, loff_t *fpos)
4768{
Steven Rostedtd696b582011-09-22 11:50:27 -04004769 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004770 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004771 struct ring_buffer_event *event;
4772 struct ring_buffer *buffer;
4773 struct print_entry *entry;
4774 unsigned long irq_flags;
4775 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004776 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004777 int nr_pages = 1;
4778 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004779 int offset;
4780 int size;
4781 int len;
4782 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004783 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004784
Steven Rostedtc76f0692008-11-07 22:36:02 -05004785 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004786 return -EINVAL;
4787
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004788 if (!(trace_flags & TRACE_ITER_MARKERS))
4789 return -EINVAL;
4790
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004791 if (cnt > TRACE_BUF_SIZE)
4792 cnt = TRACE_BUF_SIZE;
4793
Steven Rostedtd696b582011-09-22 11:50:27 -04004794 /*
4795 * Userspace is injecting traces into the kernel trace buffer.
4796 * We want to be as non intrusive as possible.
4797 * To do so, we do not want to allocate any special buffers
4798 * or take any locks, but instead write the userspace data
4799 * straight into the ring buffer.
4800 *
4801 * First we need to pin the userspace buffer into memory,
4802 * which, most likely it is, because it just referenced it.
4803 * But there's no guarantee that it is. By using get_user_pages_fast()
4804 * and kmap_atomic/kunmap_atomic() we can get access to the
4805 * pages directly. We then write the data directly into the
4806 * ring buffer.
4807 */
4808 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004809
Steven Rostedtd696b582011-09-22 11:50:27 -04004810 /* check if we cross pages */
4811 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4812 nr_pages = 2;
4813
4814 offset = addr & (PAGE_SIZE - 1);
4815 addr &= PAGE_MASK;
4816
4817 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4818 if (ret < nr_pages) {
4819 while (--ret >= 0)
4820 put_page(pages[ret]);
4821 written = -EFAULT;
4822 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004823 }
4824
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004825 for (i = 0; i < nr_pages; i++)
4826 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004827
4828 local_save_flags(irq_flags);
4829 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004830 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004831 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4832 irq_flags, preempt_count());
4833 if (!event) {
4834 /* Ring buffer disabled, return as if not open for write */
4835 written = -EBADF;
4836 goto out_unlock;
4837 }
4838
4839 entry = ring_buffer_event_data(event);
4840 entry->ip = _THIS_IP_;
4841
4842 if (nr_pages == 2) {
4843 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004844 memcpy(&entry->buf, map_page[0] + offset, len);
4845 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004846 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004847 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004848
4849 if (entry->buf[cnt - 1] != '\n') {
4850 entry->buf[cnt] = '\n';
4851 entry->buf[cnt + 1] = '\0';
4852 } else
4853 entry->buf[cnt] = '\0';
4854
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004855 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004856
4857 written = cnt;
4858
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004859 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004860
Steven Rostedtd696b582011-09-22 11:50:27 -04004861 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004862 for (i = 0; i < nr_pages; i++){
4863 kunmap_atomic(map_page[i]);
4864 put_page(pages[i]);
4865 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004866 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004867 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004868}
4869
Li Zefan13f16d22009-12-08 11:16:11 +08004870static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004871{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004872 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004873 int i;
4874
4875 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004876 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004877 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004878 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4879 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004880 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004881
Li Zefan13f16d22009-12-08 11:16:11 +08004882 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004883}
4884
Steven Rostedte1e232c2014-02-10 23:38:46 -05004885static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004886{
Zhaolei5079f322009-08-25 16:12:56 +08004887 int i;
4888
Zhaolei5079f322009-08-25 16:12:56 +08004889 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4890 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4891 break;
4892 }
4893 if (i == ARRAY_SIZE(trace_clocks))
4894 return -EINVAL;
4895
Zhaolei5079f322009-08-25 16:12:56 +08004896 mutex_lock(&trace_types_lock);
4897
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004898 tr->clock_id = i;
4899
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004900 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004901
David Sharp60303ed2012-10-11 16:27:52 -07004902 /*
4903 * New clock may not be consistent with the previous clock.
4904 * Reset the buffer so that it doesn't have incomparable timestamps.
4905 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004906 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004907
4908#ifdef CONFIG_TRACER_MAX_TRACE
4909 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4910 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004911 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004912#endif
David Sharp60303ed2012-10-11 16:27:52 -07004913
Zhaolei5079f322009-08-25 16:12:56 +08004914 mutex_unlock(&trace_types_lock);
4915
Steven Rostedte1e232c2014-02-10 23:38:46 -05004916 return 0;
4917}
4918
4919static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4920 size_t cnt, loff_t *fpos)
4921{
4922 struct seq_file *m = filp->private_data;
4923 struct trace_array *tr = m->private;
4924 char buf[64];
4925 const char *clockstr;
4926 int ret;
4927
4928 if (cnt >= sizeof(buf))
4929 return -EINVAL;
4930
4931 if (copy_from_user(&buf, ubuf, cnt))
4932 return -EFAULT;
4933
4934 buf[cnt] = 0;
4935
4936 clockstr = strstrip(buf);
4937
4938 ret = tracing_set_clock(tr, clockstr);
4939 if (ret)
4940 return ret;
4941
Zhaolei5079f322009-08-25 16:12:56 +08004942 *fpos += cnt;
4943
4944 return cnt;
4945}
4946
Li Zefan13f16d22009-12-08 11:16:11 +08004947static int tracing_clock_open(struct inode *inode, struct file *file)
4948{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004949 struct trace_array *tr = inode->i_private;
4950 int ret;
4951
Li Zefan13f16d22009-12-08 11:16:11 +08004952 if (tracing_disabled)
4953 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004954
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004955 if (trace_array_get(tr))
4956 return -ENODEV;
4957
4958 ret = single_open(file, tracing_clock_show, inode->i_private);
4959 if (ret < 0)
4960 trace_array_put(tr);
4961
4962 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004963}
4964
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004965struct ftrace_buffer_info {
4966 struct trace_iterator iter;
4967 void *spare;
4968 unsigned int read;
4969};
4970
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004971#ifdef CONFIG_TRACER_SNAPSHOT
4972static int tracing_snapshot_open(struct inode *inode, struct file *file)
4973{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004974 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004975 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004976 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004977 int ret = 0;
4978
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004979 if (trace_array_get(tr) < 0)
4980 return -ENODEV;
4981
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004982 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004983 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004984 if (IS_ERR(iter))
4985 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004986 } else {
4987 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004988 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004989 m = kzalloc(sizeof(*m), GFP_KERNEL);
4990 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004991 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004992 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4993 if (!iter) {
4994 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004995 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004996 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004997 ret = 0;
4998
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004999 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005000 iter->trace_buffer = &tr->max_buffer;
5001 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005002 m->private = iter;
5003 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005004 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005005out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005006 if (ret < 0)
5007 trace_array_put(tr);
5008
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005009 return ret;
5010}
5011
5012static ssize_t
5013tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5014 loff_t *ppos)
5015{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005016 struct seq_file *m = filp->private_data;
5017 struct trace_iterator *iter = m->private;
5018 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005019 unsigned long val;
5020 int ret;
5021
5022 ret = tracing_update_buffers();
5023 if (ret < 0)
5024 return ret;
5025
5026 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5027 if (ret)
5028 return ret;
5029
5030 mutex_lock(&trace_types_lock);
5031
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005032 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005033 ret = -EBUSY;
5034 goto out;
5035 }
5036
5037 switch (val) {
5038 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005039 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5040 ret = -EINVAL;
5041 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005042 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005043 if (tr->allocated_snapshot)
5044 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005045 break;
5046 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005047/* Only allow per-cpu swap if the ring buffer supports it */
5048#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5049 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5050 ret = -EINVAL;
5051 break;
5052 }
5053#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005054 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005055 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005056 if (ret < 0)
5057 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005058 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005059 local_irq_disable();
5060 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005061 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005062 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005063 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005064 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005065 local_irq_enable();
5066 break;
5067 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005068 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005069 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5070 tracing_reset_online_cpus(&tr->max_buffer);
5071 else
5072 tracing_reset(&tr->max_buffer, iter->cpu_file);
5073 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005074 break;
5075 }
5076
5077 if (ret >= 0) {
5078 *ppos += cnt;
5079 ret = cnt;
5080 }
5081out:
5082 mutex_unlock(&trace_types_lock);
5083 return ret;
5084}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005085
5086static int tracing_snapshot_release(struct inode *inode, struct file *file)
5087{
5088 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005089 int ret;
5090
5091 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005092
5093 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005094 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005095
5096 /* If write only, the seq_file is just a stub */
5097 if (m)
5098 kfree(m->private);
5099 kfree(m);
5100
5101 return 0;
5102}
5103
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005104static int tracing_buffers_open(struct inode *inode, struct file *filp);
5105static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5106 size_t count, loff_t *ppos);
5107static int tracing_buffers_release(struct inode *inode, struct file *file);
5108static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5109 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5110
5111static int snapshot_raw_open(struct inode *inode, struct file *filp)
5112{
5113 struct ftrace_buffer_info *info;
5114 int ret;
5115
5116 ret = tracing_buffers_open(inode, filp);
5117 if (ret < 0)
5118 return ret;
5119
5120 info = filp->private_data;
5121
5122 if (info->iter.trace->use_max_tr) {
5123 tracing_buffers_release(inode, filp);
5124 return -EBUSY;
5125 }
5126
5127 info->iter.snapshot = true;
5128 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5129
5130 return ret;
5131}
5132
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005133#endif /* CONFIG_TRACER_SNAPSHOT */
5134
5135
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005136static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005137 .open = tracing_open_generic,
5138 .read = tracing_max_lat_read,
5139 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005140 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005141};
5142
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005143static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005144 .open = tracing_open_generic,
5145 .read = tracing_set_trace_read,
5146 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005147 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005148};
5149
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005150static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005151 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005152 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005153 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005154 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005155 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005156 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005157};
5158
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005159static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005160 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005161 .read = tracing_entries_read,
5162 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005163 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005164 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005165};
5166
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005167static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005168 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005169 .read = tracing_total_entries_read,
5170 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005171 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005172};
5173
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005174static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005175 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005176 .write = tracing_free_buffer_write,
5177 .release = tracing_free_buffer_release,
5178};
5179
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005180static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005181 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005182 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005183 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005184 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005185};
5186
Zhaolei5079f322009-08-25 16:12:56 +08005187static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005188 .open = tracing_clock_open,
5189 .read = seq_read,
5190 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005191 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005192 .write = tracing_clock_write,
5193};
5194
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005195#ifdef CONFIG_TRACER_SNAPSHOT
5196static const struct file_operations snapshot_fops = {
5197 .open = tracing_snapshot_open,
5198 .read = seq_read,
5199 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005200 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005201 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005202};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005203
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005204static const struct file_operations snapshot_raw_fops = {
5205 .open = snapshot_raw_open,
5206 .read = tracing_buffers_read,
5207 .release = tracing_buffers_release,
5208 .splice_read = tracing_buffers_splice_read,
5209 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005210};
5211
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005212#endif /* CONFIG_TRACER_SNAPSHOT */
5213
Steven Rostedt2cadf912008-12-01 22:20:19 -05005214static int tracing_buffers_open(struct inode *inode, struct file *filp)
5215{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005216 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005217 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005218 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005219
5220 if (tracing_disabled)
5221 return -ENODEV;
5222
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005223 if (trace_array_get(tr) < 0)
5224 return -ENODEV;
5225
Steven Rostedt2cadf912008-12-01 22:20:19 -05005226 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005227 if (!info) {
5228 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005229 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005230 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005231
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005232 mutex_lock(&trace_types_lock);
5233
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005234 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005235 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005236 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005237 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005238 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005239 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005240 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005241
5242 filp->private_data = info;
5243
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005244 mutex_unlock(&trace_types_lock);
5245
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 ret = nonseekable_open(inode, filp);
5247 if (ret < 0)
5248 trace_array_put(tr);
5249
5250 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005251}
5252
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005253static unsigned int
5254tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5255{
5256 struct ftrace_buffer_info *info = filp->private_data;
5257 struct trace_iterator *iter = &info->iter;
5258
5259 return trace_poll(iter, filp, poll_table);
5260}
5261
Steven Rostedt2cadf912008-12-01 22:20:19 -05005262static ssize_t
5263tracing_buffers_read(struct file *filp, char __user *ubuf,
5264 size_t count, loff_t *ppos)
5265{
5266 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005267 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005268 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005269 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005270
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005271 if (!count)
5272 return 0;
5273
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005274 mutex_lock(&trace_types_lock);
5275
5276#ifdef CONFIG_TRACER_MAX_TRACE
5277 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5278 size = -EBUSY;
5279 goto out_unlock;
5280 }
5281#endif
5282
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005283 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005284 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5285 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005286 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005287 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005288 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005289
Steven Rostedt2cadf912008-12-01 22:20:19 -05005290 /* Do we have previous read data to read? */
5291 if (info->read < PAGE_SIZE)
5292 goto read;
5293
Steven Rostedtb6273442013-02-28 13:44:11 -05005294 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005295 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005296 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005297 &info->spare,
5298 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005299 iter->cpu_file, 0);
5300 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005301
5302 if (ret < 0) {
5303 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005304 if ((filp->f_flags & O_NONBLOCK)) {
5305 size = -EAGAIN;
5306 goto out_unlock;
5307 }
5308 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005309 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005310 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005311 if (ret) {
5312 size = ret;
5313 goto out_unlock;
5314 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005315 if (signal_pending(current)) {
5316 size = -EINTR;
5317 goto out_unlock;
5318 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005319 goto again;
5320 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005321 size = 0;
5322 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005323 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005324
Steven Rostedt436fc282011-10-14 10:44:25 -04005325 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005326 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005327 size = PAGE_SIZE - info->read;
5328 if (size > count)
5329 size = count;
5330
5331 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005332 if (ret == size) {
5333 size = -EFAULT;
5334 goto out_unlock;
5335 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005336 size -= ret;
5337
Steven Rostedt2cadf912008-12-01 22:20:19 -05005338 *ppos += size;
5339 info->read += size;
5340
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005341 out_unlock:
5342 mutex_unlock(&trace_types_lock);
5343
Steven Rostedt2cadf912008-12-01 22:20:19 -05005344 return size;
5345}
5346
5347static int tracing_buffers_release(struct inode *inode, struct file *file)
5348{
5349 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005350 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005351
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005352 mutex_lock(&trace_types_lock);
5353
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005354 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005355
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005356 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005357 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005358 kfree(info);
5359
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005360 mutex_unlock(&trace_types_lock);
5361
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362 return 0;
5363}
5364
5365struct buffer_ref {
5366 struct ring_buffer *buffer;
5367 void *page;
5368 int ref;
5369};
5370
5371static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5372 struct pipe_buffer *buf)
5373{
5374 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5375
5376 if (--ref->ref)
5377 return;
5378
5379 ring_buffer_free_read_page(ref->buffer, ref->page);
5380 kfree(ref);
5381 buf->private = 0;
5382}
5383
Steven Rostedt2cadf912008-12-01 22:20:19 -05005384static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5385 struct pipe_buffer *buf)
5386{
5387 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5388
5389 ref->ref++;
5390}
5391
5392/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005393static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005394 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005395 .confirm = generic_pipe_buf_confirm,
5396 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005397 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005398 .get = buffer_pipe_buf_get,
5399};
5400
5401/*
5402 * Callback from splice_to_pipe(), if we need to release some pages
5403 * at the end of the spd in case we error'ed out in filling the pipe.
5404 */
5405static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5406{
5407 struct buffer_ref *ref =
5408 (struct buffer_ref *)spd->partial[i].private;
5409
5410 if (--ref->ref)
5411 return;
5412
5413 ring_buffer_free_read_page(ref->buffer, ref->page);
5414 kfree(ref);
5415 spd->partial[i].private = 0;
5416}
5417
5418static ssize_t
5419tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5420 struct pipe_inode_info *pipe, size_t len,
5421 unsigned int flags)
5422{
5423 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005424 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005425 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5426 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005427 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005428 .pages = pages_def,
5429 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005430 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005431 .flags = flags,
5432 .ops = &buffer_pipe_buf_ops,
5433 .spd_release = buffer_spd_release,
5434 };
5435 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005436 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005437 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005438
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005439 mutex_lock(&trace_types_lock);
5440
5441#ifdef CONFIG_TRACER_MAX_TRACE
5442 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5443 ret = -EBUSY;
5444 goto out;
5445 }
5446#endif
5447
5448 if (splice_grow_spd(pipe, &spd)) {
5449 ret = -ENOMEM;
5450 goto out;
5451 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005452
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005453 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005454 ret = -EINVAL;
5455 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005456 }
5457
5458 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005459 if (len < PAGE_SIZE) {
5460 ret = -EINVAL;
5461 goto out;
5462 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005463 len &= PAGE_MASK;
5464 }
5465
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005466 again:
5467 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005468 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005469
Al Viroa786c062014-04-11 12:01:03 -04005470 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005471 struct page *page;
5472 int r;
5473
5474 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5475 if (!ref)
5476 break;
5477
Steven Rostedt7267fa62009-04-29 00:16:21 -04005478 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005479 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005480 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005481 if (!ref->page) {
5482 kfree(ref);
5483 break;
5484 }
5485
5486 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005487 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005488 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005489 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005490 kfree(ref);
5491 break;
5492 }
5493
5494 /*
5495 * zero out any left over data, this is going to
5496 * user land.
5497 */
5498 size = ring_buffer_page_len(ref->page);
5499 if (size < PAGE_SIZE)
5500 memset(ref->page + size, 0, PAGE_SIZE - size);
5501
5502 page = virt_to_page(ref->page);
5503
5504 spd.pages[i] = page;
5505 spd.partial[i].len = PAGE_SIZE;
5506 spd.partial[i].offset = 0;
5507 spd.partial[i].private = (unsigned long)ref;
5508 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005509 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005510
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005511 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005512 }
5513
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005514 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005515 spd.nr_pages = i;
5516
5517 /* did we read anything? */
5518 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005519 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005520 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005521 goto out;
5522 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005523 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005524 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005525 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005526 if (ret)
5527 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005528 if (signal_pending(current)) {
5529 ret = -EINTR;
5530 goto out;
5531 }
5532 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005533 }
5534
5535 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005536 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005537out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005538 mutex_unlock(&trace_types_lock);
5539
Steven Rostedt2cadf912008-12-01 22:20:19 -05005540 return ret;
5541}
5542
5543static const struct file_operations tracing_buffers_fops = {
5544 .open = tracing_buffers_open,
5545 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005546 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005547 .release = tracing_buffers_release,
5548 .splice_read = tracing_buffers_splice_read,
5549 .llseek = no_llseek,
5550};
5551
Steven Rostedtc8d77182009-04-29 18:03:45 -04005552static ssize_t
5553tracing_stats_read(struct file *filp, char __user *ubuf,
5554 size_t count, loff_t *ppos)
5555{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005556 struct inode *inode = file_inode(filp);
5557 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005558 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005559 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005560 struct trace_seq *s;
5561 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005562 unsigned long long t;
5563 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005564
Li Zefane4f2d102009-06-15 10:57:28 +08005565 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005566 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005567 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005568
5569 trace_seq_init(s);
5570
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005571 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005572 trace_seq_printf(s, "entries: %ld\n", cnt);
5573
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005574 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005575 trace_seq_printf(s, "overrun: %ld\n", cnt);
5576
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005577 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005578 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5579
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005580 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005581 trace_seq_printf(s, "bytes: %ld\n", cnt);
5582
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005583 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005584 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005585 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005586 usec_rem = do_div(t, USEC_PER_SEC);
5587 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5588 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005589
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005590 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005591 usec_rem = do_div(t, USEC_PER_SEC);
5592 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5593 } else {
5594 /* counter or tsc mode for trace_clock */
5595 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005596 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005597
5598 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005599 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005600 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005601
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005602 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005603 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5604
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005605 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005606 trace_seq_printf(s, "read events: %ld\n", cnt);
5607
Steven Rostedtc8d77182009-04-29 18:03:45 -04005608 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5609
5610 kfree(s);
5611
5612 return count;
5613}
5614
5615static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005616 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005617 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005618 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005619 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005620};
5621
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005622#ifdef CONFIG_DYNAMIC_FTRACE
5623
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005624int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005625{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005626 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005627}
5628
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005629static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005630tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005631 size_t cnt, loff_t *ppos)
5632{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005633 static char ftrace_dyn_info_buffer[1024];
5634 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005635 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005636 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005637 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005638 int r;
5639
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005640 mutex_lock(&dyn_info_mutex);
5641 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005642
Steven Rostedta26a2a22008-10-31 00:03:22 -04005643 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005644 buf[r++] = '\n';
5645
5646 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5647
5648 mutex_unlock(&dyn_info_mutex);
5649
5650 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005651}
5652
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005653static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005654 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005655 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005656 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005657};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005658#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005659
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005660#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5661static void
5662ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005663{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005664 tracing_snapshot();
5665}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005666
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005667static void
5668ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5669{
5670 unsigned long *count = (long *)data;
5671
5672 if (!*count)
5673 return;
5674
5675 if (*count != -1)
5676 (*count)--;
5677
5678 tracing_snapshot();
5679}
5680
5681static int
5682ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5683 struct ftrace_probe_ops *ops, void *data)
5684{
5685 long count = (long)data;
5686
5687 seq_printf(m, "%ps:", (void *)ip);
5688
5689 seq_printf(m, "snapshot");
5690
5691 if (count == -1)
5692 seq_printf(m, ":unlimited\n");
5693 else
5694 seq_printf(m, ":count=%ld\n", count);
5695
5696 return 0;
5697}
5698
5699static struct ftrace_probe_ops snapshot_probe_ops = {
5700 .func = ftrace_snapshot,
5701 .print = ftrace_snapshot_print,
5702};
5703
5704static struct ftrace_probe_ops snapshot_count_probe_ops = {
5705 .func = ftrace_count_snapshot,
5706 .print = ftrace_snapshot_print,
5707};
5708
5709static int
5710ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5711 char *glob, char *cmd, char *param, int enable)
5712{
5713 struct ftrace_probe_ops *ops;
5714 void *count = (void *)-1;
5715 char *number;
5716 int ret;
5717
5718 /* hash funcs only work with set_ftrace_filter */
5719 if (!enable)
5720 return -EINVAL;
5721
5722 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5723
5724 if (glob[0] == '!') {
5725 unregister_ftrace_function_probe_func(glob+1, ops);
5726 return 0;
5727 }
5728
5729 if (!param)
5730 goto out_reg;
5731
5732 number = strsep(&param, ":");
5733
5734 if (!strlen(number))
5735 goto out_reg;
5736
5737 /*
5738 * We use the callback data field (which is a pointer)
5739 * as our counter.
5740 */
5741 ret = kstrtoul(number, 0, (unsigned long *)&count);
5742 if (ret)
5743 return ret;
5744
5745 out_reg:
5746 ret = register_ftrace_function_probe(glob, ops, count);
5747
5748 if (ret >= 0)
5749 alloc_snapshot(&global_trace);
5750
5751 return ret < 0 ? ret : 0;
5752}
5753
5754static struct ftrace_func_command ftrace_snapshot_cmd = {
5755 .name = "snapshot",
5756 .func = ftrace_trace_snapshot_callback,
5757};
5758
Tom Zanussi38de93a2013-10-24 08:34:18 -05005759static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005760{
5761 return register_ftrace_command(&ftrace_snapshot_cmd);
5762}
5763#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005764static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005765#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005766
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005767struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005768{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005769 if (tr->dir)
5770 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005771
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005772 if (!debugfs_initialized())
5773 return NULL;
5774
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005775 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5776 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005777
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005778 if (!tr->dir)
5779 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005780
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005781 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005782}
5783
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005784struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005785{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005786 return tracing_init_dentry_tr(&global_trace);
5787}
5788
5789static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5790{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005791 struct dentry *d_tracer;
5792
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005793 if (tr->percpu_dir)
5794 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005795
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005796 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005797 if (!d_tracer)
5798 return NULL;
5799
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005800 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005801
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005802 WARN_ONCE(!tr->percpu_dir,
5803 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005804
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005805 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005806}
5807
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005808static struct dentry *
5809trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5810 void *data, long cpu, const struct file_operations *fops)
5811{
5812 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5813
5814 if (ret) /* See tracing_get_cpu() */
5815 ret->d_inode->i_cdev = (void *)(cpu + 1);
5816 return ret;
5817}
5818
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005819static void
5820tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005821{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005822 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005823 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005824 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005825
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005826 if (!d_percpu)
5827 return;
5828
Steven Rostedtdd49a382010-10-20 21:51:26 -04005829 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005830 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5831 if (!d_cpu) {
5832 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5833 return;
5834 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005835
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005836 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005837 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005838 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005839
5840 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005841 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005842 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005843
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005844 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005845 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005846
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005847 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005848 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005849
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005850 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005851 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005852
5853#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005854 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005855 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005856
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005857 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005858 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005859#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005860}
5861
Steven Rostedt60a11772008-05-12 21:20:44 +02005862#ifdef CONFIG_FTRACE_SELFTEST
5863/* Let selftest have access to static functions in this file */
5864#include "trace_selftest.c"
5865#endif
5866
Steven Rostedt577b7852009-02-26 23:43:05 -05005867struct trace_option_dentry {
5868 struct tracer_opt *opt;
5869 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005871 struct dentry *entry;
5872};
5873
5874static ssize_t
5875trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5876 loff_t *ppos)
5877{
5878 struct trace_option_dentry *topt = filp->private_data;
5879 char *buf;
5880
5881 if (topt->flags->val & topt->opt->bit)
5882 buf = "1\n";
5883 else
5884 buf = "0\n";
5885
5886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5887}
5888
5889static ssize_t
5890trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5891 loff_t *ppos)
5892{
5893 struct trace_option_dentry *topt = filp->private_data;
5894 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005895 int ret;
5896
Peter Huewe22fe9b52011-06-07 21:58:27 +02005897 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5898 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005899 return ret;
5900
Li Zefan8d18eaa2009-12-08 11:17:06 +08005901 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005902 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005903
5904 if (!!(topt->flags->val & topt->opt->bit) != val) {
5905 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005906 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005907 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005908 mutex_unlock(&trace_types_lock);
5909 if (ret)
5910 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005911 }
5912
5913 *ppos += cnt;
5914
5915 return cnt;
5916}
5917
5918
5919static const struct file_operations trace_options_fops = {
5920 .open = tracing_open_generic,
5921 .read = trace_options_read,
5922 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005923 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005924};
5925
Steven Rostedta8259072009-02-26 22:19:12 -05005926static ssize_t
5927trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5928 loff_t *ppos)
5929{
5930 long index = (long)filp->private_data;
5931 char *buf;
5932
5933 if (trace_flags & (1 << index))
5934 buf = "1\n";
5935 else
5936 buf = "0\n";
5937
5938 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5939}
5940
5941static ssize_t
5942trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5943 loff_t *ppos)
5944{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005945 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005946 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005947 unsigned long val;
5948 int ret;
5949
Peter Huewe22fe9b52011-06-07 21:58:27 +02005950 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5951 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005952 return ret;
5953
Zhaoleif2d84b62009-08-07 18:55:48 +08005954 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005955 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005956
5957 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005958 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005959 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005960
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005961 if (ret < 0)
5962 return ret;
5963
Steven Rostedta8259072009-02-26 22:19:12 -05005964 *ppos += cnt;
5965
5966 return cnt;
5967}
5968
Steven Rostedta8259072009-02-26 22:19:12 -05005969static const struct file_operations trace_options_core_fops = {
5970 .open = tracing_open_generic,
5971 .read = trace_options_core_read,
5972 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005973 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005974};
5975
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005976struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005977 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005978 struct dentry *parent,
5979 void *data,
5980 const struct file_operations *fops)
5981{
5982 struct dentry *ret;
5983
5984 ret = debugfs_create_file(name, mode, parent, data, fops);
5985 if (!ret)
5986 pr_warning("Could not create debugfs '%s' entry\n", name);
5987
5988 return ret;
5989}
5990
5991
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005992static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005993{
5994 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005995
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005996 if (tr->options)
5997 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005999 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006000 if (!d_tracer)
6001 return NULL;
6002
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006003 tr->options = debugfs_create_dir("options", d_tracer);
6004 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006005 pr_warning("Could not create debugfs directory 'options'\n");
6006 return NULL;
6007 }
6008
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006009 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006010}
6011
Steven Rostedt577b7852009-02-26 23:43:05 -05006012static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006013create_trace_option_file(struct trace_array *tr,
6014 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006015 struct tracer_flags *flags,
6016 struct tracer_opt *opt)
6017{
6018 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006019
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006020 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006021 if (!t_options)
6022 return;
6023
6024 topt->flags = flags;
6025 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006026 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006027
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006028 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006029 &trace_options_fops);
6030
Steven Rostedt577b7852009-02-26 23:43:05 -05006031}
6032
6033static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006034create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006035{
6036 struct trace_option_dentry *topts;
6037 struct tracer_flags *flags;
6038 struct tracer_opt *opts;
6039 int cnt;
6040
6041 if (!tracer)
6042 return NULL;
6043
6044 flags = tracer->flags;
6045
6046 if (!flags || !flags->opts)
6047 return NULL;
6048
6049 opts = flags->opts;
6050
6051 for (cnt = 0; opts[cnt].name; cnt++)
6052 ;
6053
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006054 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006055 if (!topts)
6056 return NULL;
6057
6058 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006059 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006060 &opts[cnt]);
6061
6062 return topts;
6063}
6064
6065static void
6066destroy_trace_option_files(struct trace_option_dentry *topts)
6067{
6068 int cnt;
6069
6070 if (!topts)
6071 return;
6072
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006073 for (cnt = 0; topts[cnt].opt; cnt++)
6074 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006075
6076 kfree(topts);
6077}
6078
Steven Rostedta8259072009-02-26 22:19:12 -05006079static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006080create_trace_option_core_file(struct trace_array *tr,
6081 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006082{
6083 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006084
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006085 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006086 if (!t_options)
6087 return NULL;
6088
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006089 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006090 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006091}
6092
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006093static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006094{
6095 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006096 int i;
6097
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006098 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006099 if (!t_options)
6100 return;
6101
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006102 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006103 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006104}
6105
Steven Rostedt499e5472012-02-22 15:50:28 -05006106static ssize_t
6107rb_simple_read(struct file *filp, char __user *ubuf,
6108 size_t cnt, loff_t *ppos)
6109{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006110 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006111 char buf[64];
6112 int r;
6113
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006114 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006115 r = sprintf(buf, "%d\n", r);
6116
6117 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6118}
6119
6120static ssize_t
6121rb_simple_write(struct file *filp, const char __user *ubuf,
6122 size_t cnt, loff_t *ppos)
6123{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006124 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006125 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006126 unsigned long val;
6127 int ret;
6128
6129 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6130 if (ret)
6131 return ret;
6132
6133 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006134 mutex_lock(&trace_types_lock);
6135 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006136 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006137 if (tr->current_trace->start)
6138 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006139 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006140 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006141 if (tr->current_trace->stop)
6142 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006143 }
6144 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006145 }
6146
6147 (*ppos)++;
6148
6149 return cnt;
6150}
6151
6152static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006153 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006154 .read = rb_simple_read,
6155 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006156 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006157 .llseek = default_llseek,
6158};
6159
Steven Rostedt277ba042012-08-03 16:10:49 -04006160struct dentry *trace_instance_dir;
6161
6162static void
6163init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006165static int
6166allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006167{
6168 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006169
6170 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6171
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006172 buf->tr = tr;
6173
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006174 buf->buffer = ring_buffer_alloc(size, rb_flags);
6175 if (!buf->buffer)
6176 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006177
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006178 buf->data = alloc_percpu(struct trace_array_cpu);
6179 if (!buf->data) {
6180 ring_buffer_free(buf->buffer);
6181 return -ENOMEM;
6182 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006183
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006184 /* Allocate the first page for all buffers */
6185 set_buffer_entries(&tr->trace_buffer,
6186 ring_buffer_size(tr->trace_buffer.buffer, 0));
6187
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006188 return 0;
6189}
6190
6191static int allocate_trace_buffers(struct trace_array *tr, int size)
6192{
6193 int ret;
6194
6195 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6196 if (ret)
6197 return ret;
6198
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006199#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006200 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6201 allocate_snapshot ? size : 1);
6202 if (WARN_ON(ret)) {
6203 ring_buffer_free(tr->trace_buffer.buffer);
6204 free_percpu(tr->trace_buffer.data);
6205 return -ENOMEM;
6206 }
6207 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006208
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006209 /*
6210 * Only the top level trace array gets its snapshot allocated
6211 * from the kernel command line.
6212 */
6213 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006214#endif
6215 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006216}
6217
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006218static void free_trace_buffer(struct trace_buffer *buf)
6219{
6220 if (buf->buffer) {
6221 ring_buffer_free(buf->buffer);
6222 buf->buffer = NULL;
6223 free_percpu(buf->data);
6224 buf->data = NULL;
6225 }
6226}
6227
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006228static void free_trace_buffers(struct trace_array *tr)
6229{
6230 if (!tr)
6231 return;
6232
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006233 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006234
6235#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006236 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006237#endif
6238}
6239
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006240static int new_instance_create(const char *name)
6241{
Steven Rostedt277ba042012-08-03 16:10:49 -04006242 struct trace_array *tr;
6243 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006244
6245 mutex_lock(&trace_types_lock);
6246
6247 ret = -EEXIST;
6248 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6249 if (tr->name && strcmp(tr->name, name) == 0)
6250 goto out_unlock;
6251 }
6252
6253 ret = -ENOMEM;
6254 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6255 if (!tr)
6256 goto out_unlock;
6257
6258 tr->name = kstrdup(name, GFP_KERNEL);
6259 if (!tr->name)
6260 goto out_free_tr;
6261
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006262 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6263 goto out_free_tr;
6264
6265 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6266
Steven Rostedt277ba042012-08-03 16:10:49 -04006267 raw_spin_lock_init(&tr->start_lock);
6268
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006269 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6270
Steven Rostedt277ba042012-08-03 16:10:49 -04006271 tr->current_trace = &nop_trace;
6272
6273 INIT_LIST_HEAD(&tr->systems);
6274 INIT_LIST_HEAD(&tr->events);
6275
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006276 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006277 goto out_free_tr;
6278
Steven Rostedt277ba042012-08-03 16:10:49 -04006279 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6280 if (!tr->dir)
6281 goto out_free_tr;
6282
6283 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006284 if (ret) {
6285 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006286 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006287 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006288
6289 init_tracer_debugfs(tr, tr->dir);
6290
6291 list_add(&tr->list, &ftrace_trace_arrays);
6292
6293 mutex_unlock(&trace_types_lock);
6294
6295 return 0;
6296
6297 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006298 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006299 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006300 kfree(tr->name);
6301 kfree(tr);
6302
6303 out_unlock:
6304 mutex_unlock(&trace_types_lock);
6305
6306 return ret;
6307
6308}
6309
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006310static int instance_delete(const char *name)
6311{
6312 struct trace_array *tr;
6313 int found = 0;
6314 int ret;
6315
6316 mutex_lock(&trace_types_lock);
6317
6318 ret = -ENODEV;
6319 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6320 if (tr->name && strcmp(tr->name, name) == 0) {
6321 found = 1;
6322 break;
6323 }
6324 }
6325 if (!found)
6326 goto out_unlock;
6327
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006328 ret = -EBUSY;
6329 if (tr->ref)
6330 goto out_unlock;
6331
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006332 list_del(&tr->list);
6333
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006334 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006335 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006336 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006337 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006338 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006339
6340 kfree(tr->name);
6341 kfree(tr);
6342
6343 ret = 0;
6344
6345 out_unlock:
6346 mutex_unlock(&trace_types_lock);
6347
6348 return ret;
6349}
6350
Steven Rostedt277ba042012-08-03 16:10:49 -04006351static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6352{
6353 struct dentry *parent;
6354 int ret;
6355
6356 /* Paranoid: Make sure the parent is the "instances" directory */
6357 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6358 if (WARN_ON_ONCE(parent != trace_instance_dir))
6359 return -ENOENT;
6360
6361 /*
6362 * The inode mutex is locked, but debugfs_create_dir() will also
6363 * take the mutex. As the instances directory can not be destroyed
6364 * or changed in any other way, it is safe to unlock it, and
6365 * let the dentry try. If two users try to make the same dir at
6366 * the same time, then the new_instance_create() will determine the
6367 * winner.
6368 */
6369 mutex_unlock(&inode->i_mutex);
6370
6371 ret = new_instance_create(dentry->d_iname);
6372
6373 mutex_lock(&inode->i_mutex);
6374
6375 return ret;
6376}
6377
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006378static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6379{
6380 struct dentry *parent;
6381 int ret;
6382
6383 /* Paranoid: Make sure the parent is the "instances" directory */
6384 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6385 if (WARN_ON_ONCE(parent != trace_instance_dir))
6386 return -ENOENT;
6387
6388 /* The caller did a dget() on dentry */
6389 mutex_unlock(&dentry->d_inode->i_mutex);
6390
6391 /*
6392 * The inode mutex is locked, but debugfs_create_dir() will also
6393 * take the mutex. As the instances directory can not be destroyed
6394 * or changed in any other way, it is safe to unlock it, and
6395 * let the dentry try. If two users try to make the same dir at
6396 * the same time, then the instance_delete() will determine the
6397 * winner.
6398 */
6399 mutex_unlock(&inode->i_mutex);
6400
6401 ret = instance_delete(dentry->d_iname);
6402
6403 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6404 mutex_lock(&dentry->d_inode->i_mutex);
6405
6406 return ret;
6407}
6408
Steven Rostedt277ba042012-08-03 16:10:49 -04006409static const struct inode_operations instance_dir_inode_operations = {
6410 .lookup = simple_lookup,
6411 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006412 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006413};
6414
6415static __init void create_trace_instances(struct dentry *d_tracer)
6416{
6417 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6418 if (WARN_ON(!trace_instance_dir))
6419 return;
6420
6421 /* Hijack the dir inode operations, to allow mkdir */
6422 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6423}
6424
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006425static void
6426init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6427{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006428 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006429
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006430 trace_create_file("available_tracers", 0444, d_tracer,
6431 tr, &show_traces_fops);
6432
6433 trace_create_file("current_tracer", 0644, d_tracer,
6434 tr, &set_tracer_fops);
6435
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006436 trace_create_file("tracing_cpumask", 0644, d_tracer,
6437 tr, &tracing_cpumask_fops);
6438
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006439 trace_create_file("trace_options", 0644, d_tracer,
6440 tr, &tracing_iter_fops);
6441
6442 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006443 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006444
6445 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006446 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006447
6448 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006449 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006450
6451 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6452 tr, &tracing_total_entries_fops);
6453
Wang YanQing238ae932013-05-26 16:52:01 +08006454 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006455 tr, &tracing_free_buffer_fops);
6456
6457 trace_create_file("trace_marker", 0220, d_tracer,
6458 tr, &tracing_mark_fops);
6459
6460 trace_create_file("trace_clock", 0644, d_tracer, tr,
6461 &trace_clock_fops);
6462
6463 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006464 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006465
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006466#ifdef CONFIG_TRACER_MAX_TRACE
6467 trace_create_file("tracing_max_latency", 0644, d_tracer,
6468 &tr->max_latency, &tracing_max_lat_fops);
6469#endif
6470
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006471 if (ftrace_create_function_files(tr, d_tracer))
6472 WARN(1, "Could not allocate function filter files");
6473
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006474#ifdef CONFIG_TRACER_SNAPSHOT
6475 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006476 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006477#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006478
6479 for_each_tracing_cpu(cpu)
6480 tracing_init_debugfs_percpu(tr, cpu);
6481
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006482}
6483
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006484static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006485{
6486 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006488 trace_access_lock_init();
6489
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006490 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006491 if (!d_tracer)
6492 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006493
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006495
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006496 trace_create_file("tracing_thresh", 0644, d_tracer,
6497 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006498
Li Zefan339ae5d2009-04-17 10:34:30 +08006499 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006500 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006501
Avadh Patel69abe6a2009-04-10 16:04:48 -04006502 trace_create_file("saved_cmdlines", 0444, d_tracer,
6503 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006504
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006505 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6506 NULL, &tracing_saved_cmdlines_size_fops);
6507
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006508#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006509 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6510 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006511#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006512
Steven Rostedt277ba042012-08-03 16:10:49 -04006513 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006514
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006515 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006516
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006517 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006518}
6519
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006520static int trace_panic_handler(struct notifier_block *this,
6521 unsigned long event, void *unused)
6522{
Steven Rostedt944ac422008-10-23 19:26:08 -04006523 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006524 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006525 return NOTIFY_OK;
6526}
6527
6528static struct notifier_block trace_panic_notifier = {
6529 .notifier_call = trace_panic_handler,
6530 .next = NULL,
6531 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6532};
6533
6534static int trace_die_handler(struct notifier_block *self,
6535 unsigned long val,
6536 void *data)
6537{
6538 switch (val) {
6539 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006540 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006541 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006542 break;
6543 default:
6544 break;
6545 }
6546 return NOTIFY_OK;
6547}
6548
6549static struct notifier_block trace_die_notifier = {
6550 .notifier_call = trace_die_handler,
6551 .priority = 200
6552};
6553
6554/*
6555 * printk is set to max of 1024, we really don't need it that big.
6556 * Nothing should be printing 1000 characters anyway.
6557 */
6558#define TRACE_MAX_PRINT 1000
6559
6560/*
6561 * Define here KERN_TRACE so that we have one place to modify
6562 * it if we decide to change what log level the ftrace dump
6563 * should be at.
6564 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006565#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006566
Jason Wessel955b61e2010-08-05 09:22:23 -05006567void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006568trace_printk_seq(struct trace_seq *s)
6569{
6570 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006571 if (s->len >= TRACE_MAX_PRINT)
6572 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006573
6574 /* should be zero ended, but we are paranoid. */
6575 s->buffer[s->len] = 0;
6576
6577 printk(KERN_TRACE "%s", s->buffer);
6578
Steven Rostedtf9520752009-03-02 14:04:40 -05006579 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006580}
6581
Jason Wessel955b61e2010-08-05 09:22:23 -05006582void trace_init_global_iter(struct trace_iterator *iter)
6583{
6584 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006585 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006586 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006587 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006588
6589 if (iter->trace && iter->trace->open)
6590 iter->trace->open(iter);
6591
6592 /* Annotate start of buffers if we had overruns */
6593 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6594 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6595
6596 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6597 if (trace_clocks[iter->tr->clock_id].in_ns)
6598 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006599}
6600
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006601void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006602{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006603 /* use static because iter can be a bit big for the stack */
6604 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006605 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006606 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006607 unsigned long flags;
6608 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006609
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006610 /* Only allow one dump user at a time. */
6611 if (atomic_inc_return(&dump_running) != 1) {
6612 atomic_dec(&dump_running);
6613 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006614 }
6615
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006616 /*
6617 * Always turn off tracing when we dump.
6618 * We don't need to show trace output of what happens
6619 * between multiple crashes.
6620 *
6621 * If the user does a sysrq-z, then they can re-enable
6622 * tracing with echo 1 > tracing_on.
6623 */
6624 tracing_off();
6625
6626 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006627
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006628 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006629 trace_init_global_iter(&iter);
6630
Steven Rostedtd7690412008-10-01 00:29:53 -04006631 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006632 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006633 }
6634
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006635 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6636
Török Edwinb54d3de2008-11-22 13:28:48 +02006637 /* don't look at user memory in panic mode */
6638 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6639
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006640 switch (oops_dump_mode) {
6641 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006642 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006643 break;
6644 case DUMP_ORIG:
6645 iter.cpu_file = raw_smp_processor_id();
6646 break;
6647 case DUMP_NONE:
6648 goto out_enable;
6649 default:
6650 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006651 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006652 }
6653
6654 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006655
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006656 /* Did function tracer already get disabled? */
6657 if (ftrace_is_dead()) {
6658 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6659 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6660 }
6661
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006662 /*
6663 * We need to stop all tracing on all CPUS to read the
6664 * the next buffer. This is a bit expensive, but is
6665 * not done often. We fill all what we can read,
6666 * and then release the locks again.
6667 */
6668
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006669 while (!trace_empty(&iter)) {
6670
6671 if (!cnt)
6672 printk(KERN_TRACE "---------------------------------\n");
6673
6674 cnt++;
6675
6676 /* reset all but tr, trace, and overruns */
6677 memset(&iter.seq, 0,
6678 sizeof(struct trace_iterator) -
6679 offsetof(struct trace_iterator, seq));
6680 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6681 iter.pos = -1;
6682
Jason Wessel955b61e2010-08-05 09:22:23 -05006683 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006684 int ret;
6685
6686 ret = print_trace_line(&iter);
6687 if (ret != TRACE_TYPE_NO_CONSUME)
6688 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006689 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006690 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006691
6692 trace_printk_seq(&iter.seq);
6693 }
6694
6695 if (!cnt)
6696 printk(KERN_TRACE " (ftrace buffer empty)\n");
6697 else
6698 printk(KERN_TRACE "---------------------------------\n");
6699
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006700 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006701 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006702
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006703 for_each_tracing_cpu(cpu) {
6704 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006705 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006706 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006707 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006708}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006709EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006710
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006711__init static int tracer_alloc_buffers(void)
6712{
Steven Rostedt73c51622009-03-11 13:42:01 -04006713 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306714 int ret = -ENOMEM;
6715
David Sharp750912f2010-12-08 13:46:47 -08006716
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306717 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6718 goto out;
6719
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006720 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306721 goto out_free_buffer_mask;
6722
Steven Rostedt07d777f2011-09-22 14:01:55 -04006723 /* Only allocate trace_printk buffers if a trace_printk exists */
6724 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006725 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006726 trace_printk_init_buffers();
6727
Steven Rostedt73c51622009-03-11 13:42:01 -04006728 /* To save memory, keep the ring buffer size to its minimum */
6729 if (ring_buffer_expanded)
6730 ring_buf_size = trace_buf_size;
6731 else
6732 ring_buf_size = 1;
6733
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306734 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006735 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006736
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006737 raw_spin_lock_init(&global_trace.start_lock);
6738
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006739 /* Used for event triggers */
6740 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6741 if (!temp_buffer)
6742 goto out_free_cpumask;
6743
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006744 if (trace_create_savedcmd() < 0)
6745 goto out_free_temp_buffer;
6746
Steven Rostedtab464282008-05-12 21:21:00 +02006747 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006748 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006749 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6750 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006751 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006752 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006753
Steven Rostedt499e5472012-02-22 15:50:28 -05006754 if (global_trace.buffer_disabled)
6755 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006756
Steven Rostedte1e232c2014-02-10 23:38:46 -05006757 if (trace_boot_clock) {
6758 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6759 if (ret < 0)
6760 pr_warning("Trace clock %s not defined, going back to default\n",
6761 trace_boot_clock);
6762 }
6763
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006764 /*
6765 * register_tracer() might reference current_trace, so it
6766 * needs to be set before we register anything. This is
6767 * just a bootstrap of current_trace anyway.
6768 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006769 global_trace.current_trace = &nop_trace;
6770
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006771 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6772
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006773 ftrace_init_global_array_ops(&global_trace);
6774
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006775 register_tracer(&nop_trace);
6776
Steven Rostedt60a11772008-05-12 21:20:44 +02006777 /* All seems OK, enable tracing */
6778 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006779
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006780 atomic_notifier_chain_register(&panic_notifier_list,
6781 &trace_panic_notifier);
6782
6783 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006784
Steven Rostedtae63b312012-05-03 23:09:03 -04006785 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6786
6787 INIT_LIST_HEAD(&global_trace.systems);
6788 INIT_LIST_HEAD(&global_trace.events);
6789 list_add(&global_trace.list, &ftrace_trace_arrays);
6790
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006791 while (trace_boot_options) {
6792 char *option;
6793
6794 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006795 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006796 }
6797
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006798 register_snapshot_cmd();
6799
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006800 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006801
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006802out_free_savedcmd:
6803 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006804out_free_temp_buffer:
6805 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306806out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006807 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306808out_free_buffer_mask:
6809 free_cpumask_var(tracing_buffer_mask);
6810out:
6811 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006812}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006813
6814__init static int clear_boot_tracer(void)
6815{
6816 /*
6817 * The default tracer at boot buffer is an init section.
6818 * This function is called in lateinit. If we did not
6819 * find the boot tracer, then clear it out, to prevent
6820 * later registration from accessing the buffer that is
6821 * about to be freed.
6822 */
6823 if (!default_bootup_tracer)
6824 return 0;
6825
6826 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6827 default_bootup_tracer);
6828 default_bootup_tracer = NULL;
6829
6830 return 0;
6831}
6832
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006833early_initcall(tracer_alloc_buffers);
6834fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006835late_initcall(clear_boot_tracer);