blob: 384ede3117172fa9e6582ead5c479f42c8f98590 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001088static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001092 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001094 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001288#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001290static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001291struct saved_cmdlines_buffer {
1292 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293 unsigned *map_cmdline_to_pid;
1294 unsigned cmdline_num;
1295 int cmdline_idx;
1296 char *saved_cmdlines;
1297};
1298static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001299
Steven Rostedt25b0b442008-05-12 21:21:00 +02001300/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001301static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001302
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001303static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001304{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001305 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1306}
1307
1308static inline void set_cmdline(int idx, const char *cmdline)
1309{
1310 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1311}
1312
1313static int allocate_cmdlines_buffer(unsigned int val,
1314 struct saved_cmdlines_buffer *s)
1315{
1316 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1317 GFP_KERNEL);
1318 if (!s->map_cmdline_to_pid)
1319 return -ENOMEM;
1320
1321 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1322 if (!s->saved_cmdlines) {
1323 kfree(s->map_cmdline_to_pid);
1324 return -ENOMEM;
1325 }
1326
1327 s->cmdline_idx = 0;
1328 s->cmdline_num = val;
1329 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1330 sizeof(s->map_pid_to_cmdline));
1331 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1332 val * sizeof(*s->map_cmdline_to_pid));
1333
1334 return 0;
1335}
1336
1337static int trace_create_savedcmd(void)
1338{
1339 int ret;
1340
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001341 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001342 if (!savedcmd)
1343 return -ENOMEM;
1344
1345 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1346 if (ret < 0) {
1347 kfree(savedcmd);
1348 savedcmd = NULL;
1349 return -ENOMEM;
1350 }
1351
1352 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001353}
1354
Carsten Emdeb5130b12009-09-13 01:43:07 +02001355int is_tracing_stopped(void)
1356{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001358}
1359
Steven Rostedt0f048702008-11-05 16:05:44 -05001360/**
1361 * tracing_start - quick start of the tracer
1362 *
1363 * If tracing is enabled but was stopped by tracing_stop,
1364 * this will start the tracer back up.
1365 */
1366void tracing_start(void)
1367{
1368 struct ring_buffer *buffer;
1369 unsigned long flags;
1370
1371 if (tracing_disabled)
1372 return;
1373
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001374 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1375 if (--global_trace.stop_count) {
1376 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001377 /* Someone screwed up their debugging */
1378 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001380 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001381 goto out;
1382 }
1383
Steven Rostedta2f80712010-03-12 19:56:00 -05001384 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001385 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001386
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001388 if (buffer)
1389 ring_buffer_record_enable(buffer);
1390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001391#ifdef CONFIG_TRACER_MAX_TRACE
1392 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001395#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001396
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001397 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001398
Steven Rostedt0f048702008-11-05 16:05:44 -05001399 ftrace_start();
1400 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001401 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1402}
1403
1404static void tracing_start_tr(struct trace_array *tr)
1405{
1406 struct ring_buffer *buffer;
1407 unsigned long flags;
1408
1409 if (tracing_disabled)
1410 return;
1411
1412 /* If global, we need to also start the max tracer */
1413 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1414 return tracing_start();
1415
1416 raw_spin_lock_irqsave(&tr->start_lock, flags);
1417
1418 if (--tr->stop_count) {
1419 if (tr->stop_count < 0) {
1420 /* Someone screwed up their debugging */
1421 WARN_ON_ONCE(1);
1422 tr->stop_count = 0;
1423 }
1424 goto out;
1425 }
1426
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001427 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001428 if (buffer)
1429 ring_buffer_record_enable(buffer);
1430
1431 out:
1432 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001433}
1434
1435/**
1436 * tracing_stop - quick stop of the tracer
1437 *
1438 * Light weight way to stop tracing. Use in conjunction with
1439 * tracing_start.
1440 */
1441void tracing_stop(void)
1442{
1443 struct ring_buffer *buffer;
1444 unsigned long flags;
1445
1446 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001447 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1448 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001449 goto out;
1450
Steven Rostedta2f80712010-03-12 19:56:00 -05001451 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001452 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001454 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001455 if (buffer)
1456 ring_buffer_record_disable(buffer);
1457
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001458#ifdef CONFIG_TRACER_MAX_TRACE
1459 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001460 if (buffer)
1461 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001462#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001463
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001464 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001465
Steven Rostedt0f048702008-11-05 16:05:44 -05001466 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1468}
1469
1470static void tracing_stop_tr(struct trace_array *tr)
1471{
1472 struct ring_buffer *buffer;
1473 unsigned long flags;
1474
1475 /* If global, we need to also stop the max tracer */
1476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1477 return tracing_stop();
1478
1479 raw_spin_lock_irqsave(&tr->start_lock, flags);
1480 if (tr->stop_count++)
1481 goto out;
1482
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001483 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001484 if (buffer)
1485 ring_buffer_record_disable(buffer);
1486
1487 out:
1488 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001489}
1490
Ingo Molnare309b412008-05-12 21:20:51 +02001491void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001492
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001493static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494{
Carsten Emdea635cf02009-03-18 09:00:41 +01001495 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001496
1497 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001498 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
1500 /*
1501 * It's not the end of the world if we don't get
1502 * the lock, but we also don't want to spin
1503 * nor do we want to disable interrupts,
1504 * so if we miss here, then better luck next time.
1505 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001506 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001507 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001510 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001511 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512
Carsten Emdea635cf02009-03-18 09:00:41 +01001513 /*
1514 * Check whether the cmdline buffer at idx has a pid
1515 * mapped. We are going to overwrite that entry so we
1516 * need to clear the map_pid_to_cmdline. Otherwise we
1517 * would read the new comm for the old pid.
1518 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001519 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001520 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001521 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001523 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1524 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 }
1528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001531 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001532
1533 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534}
1535
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001536static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001538 unsigned map;
1539
Steven Rostedt4ca53082009-03-16 19:20:15 -04001540 if (!pid) {
1541 strcpy(comm, "<idle>");
1542 return;
1543 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544
Steven Rostedt74bf4072010-01-25 15:11:53 -05001545 if (WARN_ON_ONCE(pid < 0)) {
1546 strcpy(comm, "<XXX>");
1547 return;
1548 }
1549
Steven Rostedt4ca53082009-03-16 19:20:15 -04001550 if (pid > PID_MAX_DEFAULT) {
1551 strcpy(comm, "<...>");
1552 return;
1553 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001554
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001555 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001556 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001557 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001558 else
1559 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001560}
1561
1562void trace_find_cmdline(int pid, char comm[])
1563{
1564 preempt_disable();
1565 arch_spin_lock(&trace_cmdline_lock);
1566
1567 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001569 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001570 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571}
1572
Ingo Molnare309b412008-05-12 21:20:51 +02001573void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001575 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576 return;
1577
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001578 if (!__this_cpu_read(trace_cmdline_save))
1579 return;
1580
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001581 if (trace_save_cmdline(tsk))
1582 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001583}
1584
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001585void
Steven Rostedt38697052008-10-01 13:14:09 -04001586tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1587 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588{
1589 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001590
Steven Rostedt777e2082008-09-29 23:02:42 -04001591 entry->preempt_count = pc & 0xff;
1592 entry->pid = (tsk) ? tsk->pid : 0;
1593 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001594#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001595 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001596#else
1597 TRACE_FLAG_IRQS_NOSUPPORT |
1598#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001599 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1600 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001601 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1602 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001604EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001605
Steven Rostedte77405a2009-09-02 14:17:06 -04001606struct ring_buffer_event *
1607trace_buffer_lock_reserve(struct ring_buffer *buffer,
1608 int type,
1609 unsigned long len,
1610 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001611{
1612 struct ring_buffer_event *event;
1613
Steven Rostedte77405a2009-09-02 14:17:06 -04001614 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001615 if (event != NULL) {
1616 struct trace_entry *ent = ring_buffer_event_data(event);
1617
1618 tracing_generic_entry_update(ent, flags, pc);
1619 ent->type = type;
1620 }
1621
1622 return event;
1623}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001624
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001625void
1626__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1627{
1628 __this_cpu_write(trace_cmdline_save, true);
1629 ring_buffer_unlock_commit(buffer, event);
1630}
1631
Steven Rostedte77405a2009-09-02 14:17:06 -04001632static inline void
1633__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1634 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001635 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001636{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001637 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001638
Steven Rostedte77405a2009-09-02 14:17:06 -04001639 ftrace_trace_stack(buffer, flags, 6, pc);
1640 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001641}
1642
Steven Rostedte77405a2009-09-02 14:17:06 -04001643void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1644 struct ring_buffer_event *event,
1645 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001646{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001647 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001648}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001649EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001650
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001651static struct ring_buffer *temp_buffer;
1652
Steven Rostedtef5580d2009-02-27 19:38:04 -05001653struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001654trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1655 struct ftrace_event_file *ftrace_file,
1656 int type, unsigned long len,
1657 unsigned long flags, int pc)
1658{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001659 struct ring_buffer_event *entry;
1660
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001661 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001663 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001664 /*
1665 * If tracing is off, but we have triggers enabled
1666 * we still need to look at the event data. Use the temp_buffer
1667 * to store the trace event for the tigger to use. It's recusive
1668 * safe and will not be recorded anywhere.
1669 */
1670 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1671 *current_rb = temp_buffer;
1672 entry = trace_buffer_lock_reserve(*current_rb,
1673 type, len, flags, pc);
1674 }
1675 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001676}
1677EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1678
1679struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001680trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1681 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001682 unsigned long flags, int pc)
1683{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001684 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001685 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001686 type, len, flags, pc);
1687}
Steven Rostedt94487d62009-05-05 19:22:53 -04001688EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001689
Steven Rostedte77405a2009-09-02 14:17:06 -04001690void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1691 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692 unsigned long flags, int pc)
1693{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001694 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001695}
Steven Rostedt94487d62009-05-05 19:22:53 -04001696EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001697
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001698void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1699 struct ring_buffer_event *event,
1700 unsigned long flags, int pc,
1701 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001702{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001703 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001704
1705 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1706 ftrace_trace_userstack(buffer, flags, pc);
1707}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001708EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001709
Steven Rostedte77405a2009-09-02 14:17:06 -04001710void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1711 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001712{
Steven Rostedte77405a2009-09-02 14:17:06 -04001713 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001714}
Steven Rostedt12acd472009-04-17 16:01:56 -04001715EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001716
Ingo Molnare309b412008-05-12 21:20:51 +02001717void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001718trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001719 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1720 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001721{
Tom Zanussie1112b42009-03-31 00:48:49 -05001722 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001723 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001724 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001725 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001726
Steven Rostedtd7690412008-10-01 00:29:53 -04001727 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001728 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001729 return;
1730
Steven Rostedte77405a2009-09-02 14:17:06 -04001731 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001732 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001733 if (!event)
1734 return;
1735 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001736 entry->ip = ip;
1737 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001738
Tom Zanussif306cc82013-10-24 08:34:17 -05001739 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001740 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001741}
1742
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001743#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001744
1745#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1746struct ftrace_stack {
1747 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1748};
1749
1750static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1751static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1752
Steven Rostedte77405a2009-09-02 14:17:06 -04001753static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001754 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001755 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001756{
Tom Zanussie1112b42009-03-31 00:48:49 -05001757 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001758 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001759 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001760 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001761 int use_stack;
1762 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763
1764 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001765 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001767 /*
1768 * Since events can happen in NMIs there's no safe way to
1769 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1770 * or NMI comes in, it will just have to use the default
1771 * FTRACE_STACK_SIZE.
1772 */
1773 preempt_disable_notrace();
1774
Shan Wei82146522012-11-19 13:21:01 +08001775 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001776 /*
1777 * We don't need any atomic variables, just a barrier.
1778 * If an interrupt comes in, we don't care, because it would
1779 * have exited and put the counter back to what we want.
1780 * We just need a barrier to keep gcc from moving things
1781 * around.
1782 */
1783 barrier();
1784 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001785 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001786 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1787
1788 if (regs)
1789 save_stack_trace_regs(regs, &trace);
1790 else
1791 save_stack_trace(&trace);
1792
1793 if (trace.nr_entries > size)
1794 size = trace.nr_entries;
1795 } else
1796 /* From now on, use_stack is a boolean */
1797 use_stack = 0;
1798
1799 size *= sizeof(unsigned long);
1800
1801 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1802 sizeof(*entry) + size, flags, pc);
1803 if (!event)
1804 goto out;
1805 entry = ring_buffer_event_data(event);
1806
1807 memset(&entry->caller, 0, size);
1808
1809 if (use_stack)
1810 memcpy(&entry->caller, trace.entries,
1811 trace.nr_entries * sizeof(unsigned long));
1812 else {
1813 trace.max_entries = FTRACE_STACK_ENTRIES;
1814 trace.entries = entry->caller;
1815 if (regs)
1816 save_stack_trace_regs(regs, &trace);
1817 else
1818 save_stack_trace(&trace);
1819 }
1820
1821 entry->size = trace.nr_entries;
1822
Tom Zanussif306cc82013-10-24 08:34:17 -05001823 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001824 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001825
1826 out:
1827 /* Again, don't let gcc optimize things here */
1828 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001829 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001830 preempt_enable_notrace();
1831
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001832}
1833
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001834void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1835 int skip, int pc, struct pt_regs *regs)
1836{
1837 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1838 return;
1839
1840 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1841}
1842
Steven Rostedte77405a2009-09-02 14:17:06 -04001843void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1844 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001845{
1846 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1847 return;
1848
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001849 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001850}
1851
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001852void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1853 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001854{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001855 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001856}
1857
Steven Rostedt03889382009-12-11 09:48:22 -05001858/**
1859 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001860 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001861 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001862void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001863{
1864 unsigned long flags;
1865
1866 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001867 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001868
1869 local_save_flags(flags);
1870
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001871 /*
1872 * Skip 3 more, seems to get us at the caller of
1873 * this function.
1874 */
1875 skip += 3;
1876 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1877 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001878}
1879
Steven Rostedt91e86e52010-11-10 12:56:12 +01001880static DEFINE_PER_CPU(int, user_stack_count);
1881
Steven Rostedte77405a2009-09-02 14:17:06 -04001882void
1883ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001884{
Tom Zanussie1112b42009-03-31 00:48:49 -05001885 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001886 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001887 struct userstack_entry *entry;
1888 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001889
1890 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1891 return;
1892
Steven Rostedtb6345872010-03-12 20:03:30 -05001893 /*
1894 * NMIs can not handle page faults, even with fix ups.
1895 * The save user stack can (and often does) fault.
1896 */
1897 if (unlikely(in_nmi()))
1898 return;
1899
Steven Rostedt91e86e52010-11-10 12:56:12 +01001900 /*
1901 * prevent recursion, since the user stack tracing may
1902 * trigger other kernel events.
1903 */
1904 preempt_disable();
1905 if (__this_cpu_read(user_stack_count))
1906 goto out;
1907
1908 __this_cpu_inc(user_stack_count);
1909
Steven Rostedte77405a2009-09-02 14:17:06 -04001910 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001911 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001912 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001913 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001914 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001915
Steven Rostedt48659d32009-09-11 11:36:23 -04001916 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001917 memset(&entry->caller, 0, sizeof(entry->caller));
1918
1919 trace.nr_entries = 0;
1920 trace.max_entries = FTRACE_STACK_ENTRIES;
1921 trace.skip = 0;
1922 trace.entries = entry->caller;
1923
1924 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001925 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001926 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001927
Li Zefan1dbd1952010-12-09 15:47:56 +08001928 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001929 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930 out:
1931 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001932}
1933
Hannes Eder4fd27352009-02-10 19:44:12 +01001934#ifdef UNUSED
1935static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001936{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001937 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001938}
Hannes Eder4fd27352009-02-10 19:44:12 +01001939#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001940
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001941#endif /* CONFIG_STACKTRACE */
1942
Steven Rostedt07d777f2011-09-22 14:01:55 -04001943/* created for use with alloc_percpu */
1944struct trace_buffer_struct {
1945 char buffer[TRACE_BUF_SIZE];
1946};
1947
1948static struct trace_buffer_struct *trace_percpu_buffer;
1949static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1950static struct trace_buffer_struct *trace_percpu_irq_buffer;
1951static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1952
1953/*
1954 * The buffer used is dependent on the context. There is a per cpu
1955 * buffer for normal context, softirq contex, hard irq context and
1956 * for NMI context. Thise allows for lockless recording.
1957 *
1958 * Note, if the buffers failed to be allocated, then this returns NULL
1959 */
1960static char *get_trace_buf(void)
1961{
1962 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001963
1964 /*
1965 * If we have allocated per cpu buffers, then we do not
1966 * need to do any locking.
1967 */
1968 if (in_nmi())
1969 percpu_buffer = trace_percpu_nmi_buffer;
1970 else if (in_irq())
1971 percpu_buffer = trace_percpu_irq_buffer;
1972 else if (in_softirq())
1973 percpu_buffer = trace_percpu_sirq_buffer;
1974 else
1975 percpu_buffer = trace_percpu_buffer;
1976
1977 if (!percpu_buffer)
1978 return NULL;
1979
Shan Weid8a03492012-11-13 09:53:04 +08001980 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001981}
1982
1983static int alloc_percpu_trace_buffer(void)
1984{
1985 struct trace_buffer_struct *buffers;
1986 struct trace_buffer_struct *sirq_buffers;
1987 struct trace_buffer_struct *irq_buffers;
1988 struct trace_buffer_struct *nmi_buffers;
1989
1990 buffers = alloc_percpu(struct trace_buffer_struct);
1991 if (!buffers)
1992 goto err_warn;
1993
1994 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1995 if (!sirq_buffers)
1996 goto err_sirq;
1997
1998 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1999 if (!irq_buffers)
2000 goto err_irq;
2001
2002 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2003 if (!nmi_buffers)
2004 goto err_nmi;
2005
2006 trace_percpu_buffer = buffers;
2007 trace_percpu_sirq_buffer = sirq_buffers;
2008 trace_percpu_irq_buffer = irq_buffers;
2009 trace_percpu_nmi_buffer = nmi_buffers;
2010
2011 return 0;
2012
2013 err_nmi:
2014 free_percpu(irq_buffers);
2015 err_irq:
2016 free_percpu(sirq_buffers);
2017 err_sirq:
2018 free_percpu(buffers);
2019 err_warn:
2020 WARN(1, "Could not allocate percpu trace_printk buffer");
2021 return -ENOMEM;
2022}
2023
Steven Rostedt81698832012-10-11 10:15:05 -04002024static int buffers_allocated;
2025
Steven Rostedt07d777f2011-09-22 14:01:55 -04002026void trace_printk_init_buffers(void)
2027{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002028 if (buffers_allocated)
2029 return;
2030
2031 if (alloc_percpu_trace_buffer())
2032 return;
2033
Steven Rostedt2184db42014-05-28 13:14:40 -04002034 /* trace_printk() is for debug use only. Don't use it in production. */
2035
2036 pr_warning("\n**********************************************************\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("** **\n");
2039 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2040 pr_warning("** **\n");
2041 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2042 pr_warning("** unsafe for produciton use. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** If you see this message and you are not debugging **\n");
2045 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2048 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002049
Steven Rostedtb382ede62012-10-10 21:44:34 -04002050 /* Expand the buffers to set size */
2051 tracing_update_buffers();
2052
Steven Rostedt07d777f2011-09-22 14:01:55 -04002053 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002054
2055 /*
2056 * trace_printk_init_buffers() can be called by modules.
2057 * If that happens, then we need to start cmdline recording
2058 * directly here. If the global_trace.buffer is already
2059 * allocated here, then this was called by module code.
2060 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002061 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002062 tracing_start_cmdline_record();
2063}
2064
2065void trace_printk_start_comm(void)
2066{
2067 /* Start tracing comms if trace printk is set */
2068 if (!buffers_allocated)
2069 return;
2070 tracing_start_cmdline_record();
2071}
2072
2073static void trace_printk_start_stop_comm(int enabled)
2074{
2075 if (!buffers_allocated)
2076 return;
2077
2078 if (enabled)
2079 tracing_start_cmdline_record();
2080 else
2081 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002082}
2083
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002086 *
2087 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002088int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089{
Tom Zanussie1112b42009-03-31 00:48:49 -05002090 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002091 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002092 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002093 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002094 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002095 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 char *tbuffer;
2097 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098
2099 if (unlikely(tracing_selftest_running || tracing_disabled))
2100 return 0;
2101
2102 /* Don't pollute graph traces with trace_vprintk internals */
2103 pause_graph_tracing();
2104
2105 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002106 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002107
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108 tbuffer = get_trace_buf();
2109 if (!tbuffer) {
2110 len = 0;
2111 goto out;
2112 }
2113
2114 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2115
2116 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002117 goto out;
2118
Steven Rostedt07d777f2011-09-22 14:01:55 -04002119 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002120 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002121 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002122 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2123 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002125 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 entry = ring_buffer_event_data(event);
2127 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128 entry->fmt = fmt;
2129
Steven Rostedt07d777f2011-09-22 14:01:55 -04002130 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002131 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002132 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002133 ftrace_trace_stack(buffer, flags, 6, pc);
2134 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002135
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002137 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138 unpause_graph_tracing();
2139
2140 return len;
2141}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002142EXPORT_SYMBOL_GPL(trace_vbprintk);
2143
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002144static int
2145__trace_array_vprintk(struct ring_buffer *buffer,
2146 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002147{
Tom Zanussie1112b42009-03-31 00:48:49 -05002148 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002149 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002150 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002152 unsigned long flags;
2153 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154
2155 if (tracing_disabled || tracing_selftest_running)
2156 return 0;
2157
Steven Rostedt07d777f2011-09-22 14:01:55 -04002158 /* Don't pollute graph traces with trace_vprintk internals */
2159 pause_graph_tracing();
2160
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002161 pc = preempt_count();
2162 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002163
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164
2165 tbuffer = get_trace_buf();
2166 if (!tbuffer) {
2167 len = 0;
2168 goto out;
2169 }
2170
2171 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2172 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002173 goto out;
2174
Steven Rostedt07d777f2011-09-22 14:01:55 -04002175 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002176 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002177 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002178 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002179 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002180 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002182 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002183
Steven Rostedt07d777f2011-09-22 14:01:55 -04002184 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002185 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002186 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002187 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002189 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002190 out:
2191 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
2194 return len;
2195}
Steven Rostedt659372d2009-09-03 19:11:07 -04002196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002197int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199{
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205{
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220{
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231}
2232
Steven Rostedt659372d2009-09-03 19:11:07 -04002233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
Steven Rostedta813a152009-10-09 01:41:35 -04002235 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002236}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002239static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002240{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
Steven Rostedt5a90f572008-09-03 17:42:51 -04002243 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002246}
2247
Ingo Molnare309b412008-05-12 21:20:51 +02002248static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002251{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002252 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254
Steven Rostedtd7690412008-10-01 00:29:53 -04002255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002259 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002260
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267}
Steven Rostedtd7690412008-10-01 00:29:53 -04002268
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002269static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002275 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002276 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002277 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002279 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002280 int cpu;
2281
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
Steven Rostedtab464282008-05-12 21:21:00 +02002296 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297
2298 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002302
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002306 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 next = ent;
2308 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002310 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002311 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002312 }
2313 }
2314
Steven Rostedt12b5da32012-03-27 10:43:28 -04002315 iter->ent_size = next_size;
2316
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
Steven Rostedtbc21b472010-03-31 19:49:26 -04002323 if (missing_events)
2324 *missing_events = next_lost;
2325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 return next;
2327}
2328
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002332{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002335
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002337void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002338{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002341
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002342 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002343 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002344
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002346}
2347
Ingo Molnare309b412008-05-12 21:20:51 +02002348static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002349{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002351 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352}
2353
Ingo Molnare309b412008-05-12 21:20:51 +02002354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355{
2356 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002358 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002360 WARN_ON_ONCE(iter->leftover);
2361
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002369 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002374 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375
2376 iter->pos = *pos;
2377
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 return ent;
2379}
2380
Jason Wessel955b61e2010-08-05 09:22:23 -05002381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002389
Steven Rostedt6d158a82012-06-27 20:46:14 -04002390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 return;
2393
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002402 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002409}
2410
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002411/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002418 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002419 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 void *p = NULL;
2421 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002422 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002433 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
Steven Rostedtae3b5092013-01-23 15:22:59 -05002448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002451 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002452 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453
Lai Jiangshanac91d852010-03-02 17:54:50 +08002454 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469 }
2470
Lai Jiangshan4f535962009-05-18 19:35:34 +08002471 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002472 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002473 return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002478 struct trace_iterator *iter = m->private;
2479
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002488 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002489 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490}
2491
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495{
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002515 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002516 *entries += count;
2517 }
2518}
2519
Ingo Molnare309b412008-05-12 21:20:51 +02002520static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521{
Michael Ellermana6168352008-08-20 16:36:11 -07002522 seq_puts(m, "# _------=> CPU# \n");
2523 seq_puts(m, "# / _-----=> irqs-off \n");
2524 seq_puts(m, "# | / _----=> need-resched \n");
2525 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2526 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002527 seq_puts(m, "# |||| / delay \n");
2528 seq_puts(m, "# cmd pid ||||| time | caller \n");
2529 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530}
2531
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534 unsigned long total;
2535 unsigned long entries;
2536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541}
2542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002544{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002547 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548}
2549
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002552 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002553 seq_puts(m, "# _-----=> irqs-off\n");
2554 seq_puts(m, "# / _----=> need-resched\n");
2555 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2556 seq_puts(m, "# || / _--=> preempt-depth\n");
2557 seq_puts(m, "# ||| / delay\n");
2558 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2559 seq_puts(m, "# | | | |||| | |\n");
2560}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Jiri Olsa62b915f2010-04-02 19:01:22 +02002562void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002568 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002569 unsigned long entries;
2570 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 const char *name = "preemption";
2572
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002573 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002583 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002585 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002586 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587#if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002591#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "preempt",
2593#else
2594 "unknown",
2595#endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601 seq_puts(m, ")\n");
2602#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002609 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610
2611 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002618 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 }
2620
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622}
2623
Steven Rostedta3097202008-11-07 22:36:02 -05002624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626 struct trace_seq *s = &iter->seq;
2627
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
Rusty Russell44623442009-01-01 10:12:23 +10302634 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002635 return;
2636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002638 return;
2639
Rusty Russell44623442009-01-01 10:12:23 +10302640 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002646}
2647
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649{
Steven Rostedt214023c2008-05-12 21:20:46 +02002650 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002652 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002653 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002655 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002656
Steven Rostedta3097202008-11-07 22:36:02 -05002657 test_cpu_buff_start(iter);
2658
Steven Rostedtf633cef2008-12-23 23:24:13 -05002659 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002660
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002662 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2663 if (!trace_print_lat_context(iter))
2664 goto partial;
2665 } else {
2666 if (!trace_print_context(iter))
2667 goto partial;
2668 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002669 }
2670
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002671 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002673
2674 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2675 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002676
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002677 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002678partial:
2679 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002680}
2681
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002682static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002683{
2684 struct trace_seq *s = &iter->seq;
2685 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002686 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002687
2688 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002689
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002690 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002691 if (!trace_seq_printf(s, "%d %d %llu ",
2692 entry->pid, iter->cpu, iter->ts))
2693 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002694 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002695
Steven Rostedtf633cef2008-12-23 23:24:13 -05002696 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002697 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002698 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002699
2700 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2701 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002702
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002703 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002704partial:
2705 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002706}
2707
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002708static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002709{
2710 struct trace_seq *s = &iter->seq;
2711 unsigned char newline = '\n';
2712 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002713 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002714
2715 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002716
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002717 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2718 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2719 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2720 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2721 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002722
Steven Rostedtf633cef2008-12-23 23:24:13 -05002723 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002724 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002725 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002726 if (ret != TRACE_TYPE_HANDLED)
2727 return ret;
2728 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002729
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002730 SEQ_PUT_FIELD_RET(s, newline);
2731
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002732 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002733}
2734
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002736{
2737 struct trace_seq *s = &iter->seq;
2738 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002739 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002740
2741 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002742
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002743 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2744 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002745 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002746 SEQ_PUT_FIELD_RET(s, iter->ts);
2747 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002748
Steven Rostedtf633cef2008-12-23 23:24:13 -05002749 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002750 return event ? event->funcs->binary(iter, 0, event) :
2751 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002752}
2753
Jiri Olsa62b915f2010-04-02 19:01:22 +02002754int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002756 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002757 int cpu;
2758
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002759 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002760 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002761 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002762 buf_iter = trace_buffer_iter(iter, cpu);
2763 if (buf_iter) {
2764 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002765 return 0;
2766 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002767 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002768 return 0;
2769 }
2770 return 1;
2771 }
2772
Steven Rostedtab464282008-05-12 21:21:00 +02002773 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002774 buf_iter = trace_buffer_iter(iter, cpu);
2775 if (buf_iter) {
2776 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002777 return 0;
2778 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002779 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002780 return 0;
2781 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002782 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002783
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002784 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002785}
2786
Lai Jiangshan4f535962009-05-18 19:35:34 +08002787/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002788enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002789{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002790 enum print_line_t ret;
2791
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002792 if (iter->lost_events &&
2793 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794 iter->cpu, iter->lost_events))
2795 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002796
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002797 if (iter->trace && iter->trace->print_line) {
2798 ret = iter->trace->print_line(iter);
2799 if (ret != TRACE_TYPE_UNHANDLED)
2800 return ret;
2801 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002802
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002803 if (iter->ent->type == TRACE_BPUTS &&
2804 trace_flags & TRACE_ITER_PRINTK &&
2805 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2806 return trace_print_bputs_msg_only(iter);
2807
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002808 if (iter->ent->type == TRACE_BPRINT &&
2809 trace_flags & TRACE_ITER_PRINTK &&
2810 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002811 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002812
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002813 if (iter->ent->type == TRACE_PRINT &&
2814 trace_flags & TRACE_ITER_PRINTK &&
2815 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002816 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002817
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002818 if (trace_flags & TRACE_ITER_BIN)
2819 return print_bin_fmt(iter);
2820
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002821 if (trace_flags & TRACE_ITER_HEX)
2822 return print_hex_fmt(iter);
2823
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002824 if (trace_flags & TRACE_ITER_RAW)
2825 return print_raw_fmt(iter);
2826
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002827 return print_trace_fmt(iter);
2828}
2829
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002830void trace_latency_header(struct seq_file *m)
2831{
2832 struct trace_iterator *iter = m->private;
2833
2834 /* print nothing if the buffers are empty */
2835 if (trace_empty(iter))
2836 return;
2837
2838 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2839 print_trace_header(m, iter);
2840
2841 if (!(trace_flags & TRACE_ITER_VERBOSE))
2842 print_lat_help_header(m);
2843}
2844
Jiri Olsa62b915f2010-04-02 19:01:22 +02002845void trace_default_header(struct seq_file *m)
2846{
2847 struct trace_iterator *iter = m->private;
2848
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002849 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2850 return;
2851
Jiri Olsa62b915f2010-04-02 19:01:22 +02002852 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2853 /* print nothing if the buffers are empty */
2854 if (trace_empty(iter))
2855 return;
2856 print_trace_header(m, iter);
2857 if (!(trace_flags & TRACE_ITER_VERBOSE))
2858 print_lat_help_header(m);
2859 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002860 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2861 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002862 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002863 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002864 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002865 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002866 }
2867}
2868
Steven Rostedte0a413f2011-09-29 21:26:16 -04002869static void test_ftrace_alive(struct seq_file *m)
2870{
2871 if (!ftrace_is_dead())
2872 return;
2873 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2874 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2875}
2876
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002877#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002878static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002879{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2881 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2882 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002883 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002884 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2885 seq_printf(m, "# is not a '0' or '1')\n");
2886}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002887
2888static void show_snapshot_percpu_help(struct seq_file *m)
2889{
2890 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2891#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2892 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2893 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2894#else
2895 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2896 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2897#endif
2898 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2899 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2900 seq_printf(m, "# is not a '0' or '1')\n");
2901}
2902
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002903static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2904{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002905 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002906 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2907 else
2908 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2909
2910 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002911 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2912 show_snapshot_main_help(m);
2913 else
2914 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002915}
2916#else
2917/* Should never be called */
2918static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2919#endif
2920
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002921static int s_show(struct seq_file *m, void *v)
2922{
2923 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002924 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925
2926 if (iter->ent == NULL) {
2927 if (iter->tr) {
2928 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2929 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002930 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002931 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002932 if (iter->snapshot && trace_empty(iter))
2933 print_snapshot_help(m, iter);
2934 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002935 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002936 else
2937 trace_default_header(m);
2938
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002939 } else if (iter->leftover) {
2940 /*
2941 * If we filled the seq_file buffer earlier, we
2942 * want to just show it now.
2943 */
2944 ret = trace_print_seq(m, &iter->seq);
2945
2946 /* ret should this time be zero, but you never know */
2947 iter->leftover = ret;
2948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002950 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002951 ret = trace_print_seq(m, &iter->seq);
2952 /*
2953 * If we overflow the seq_file buffer, then it will
2954 * ask us for this data again at start up.
2955 * Use that instead.
2956 * ret is 0 if seq_file write succeeded.
2957 * -1 otherwise.
2958 */
2959 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960 }
2961
2962 return 0;
2963}
2964
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002965/*
2966 * Should be used after trace_array_get(), trace_types_lock
2967 * ensures that i_cdev was already initialized.
2968 */
2969static inline int tracing_get_cpu(struct inode *inode)
2970{
2971 if (inode->i_cdev) /* See trace_create_cpu_file() */
2972 return (long)inode->i_cdev - 1;
2973 return RING_BUFFER_ALL_CPUS;
2974}
2975
James Morris88e9d342009-09-22 16:43:43 -07002976static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002977 .start = s_start,
2978 .next = s_next,
2979 .stop = s_stop,
2980 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002981};
2982
Ingo Molnare309b412008-05-12 21:20:51 +02002983static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002984__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002986 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002988 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002990 if (tracing_disabled)
2991 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002992
Jiri Olsa50e18b92012-04-25 10:23:39 +02002993 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002994 if (!iter)
2995 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996
Steven Rostedt6d158a82012-06-27 20:46:14 -04002997 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2998 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002999 if (!iter->buffer_iter)
3000 goto release;
3001
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003002 /*
3003 * We make a copy of the current tracer to avoid concurrent
3004 * changes on it while we are reading.
3005 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003006 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003008 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003009 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003011 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003012
Li Zefan79f55992009-06-15 14:58:26 +08003013 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003014 goto fail;
3015
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003016 iter->tr = tr;
3017
3018#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003019 /* Currently only the top directory has a snapshot */
3020 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003021 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003022 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023#endif
3024 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003025 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003026 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003027 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003028 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003029
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003030 /* Notify the tracer early; before we stop tracing. */
3031 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003032 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003033
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003034 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003035 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003036 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3037
David Sharp8be07092012-11-13 12:18:22 -08003038 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003039 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003040 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3041
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003042 /* stop the trace while dumping if we are not opening "snapshot" */
3043 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003044 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003045
Steven Rostedtae3b5092013-01-23 15:22:59 -05003046 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003047 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003048 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003050 }
3051 ring_buffer_read_prepare_sync();
3052 for_each_tracing_cpu(cpu) {
3053 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003054 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003055 }
3056 } else {
3057 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003058 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003059 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003060 ring_buffer_read_prepare_sync();
3061 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003062 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003063 }
3064
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065 mutex_unlock(&trace_types_lock);
3066
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003067 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003068
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003069 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003070 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003071 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003072 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003073release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003074 seq_release_private(inode, file);
3075 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076}
3077
3078int tracing_open_generic(struct inode *inode, struct file *filp)
3079{
Steven Rostedt60a11772008-05-12 21:20:44 +02003080 if (tracing_disabled)
3081 return -ENODEV;
3082
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003083 filp->private_data = inode->i_private;
3084 return 0;
3085}
3086
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003087bool tracing_is_disabled(void)
3088{
3089 return (tracing_disabled) ? true: false;
3090}
3091
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003092/*
3093 * Open and update trace_array ref count.
3094 * Must have the current trace_array passed to it.
3095 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003096static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003097{
3098 struct trace_array *tr = inode->i_private;
3099
3100 if (tracing_disabled)
3101 return -ENODEV;
3102
3103 if (trace_array_get(tr) < 0)
3104 return -ENODEV;
3105
3106 filp->private_data = inode->i_private;
3107
3108 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003109}
3110
Hannes Eder4fd27352009-02-10 19:44:12 +01003111static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003112{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003113 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003114 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003115 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003116 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003117
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003118 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003119 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003120 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003121 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003125 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003126
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003127 for_each_tracing_cpu(cpu) {
3128 if (iter->buffer_iter[cpu])
3129 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3130 }
3131
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132 if (iter->trace && iter->trace->close)
3133 iter->trace->close(iter);
3134
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003135 if (!iter->snapshot)
3136 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003137 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003138
3139 __trace_array_put(tr);
3140
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141 mutex_unlock(&trace_types_lock);
3142
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003143 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003144 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003145 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003146 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003147 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003148
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003149 return 0;
3150}
3151
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003152static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3153{
3154 struct trace_array *tr = inode->i_private;
3155
3156 trace_array_put(tr);
3157 return 0;
3158}
3159
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003160static int tracing_single_release_tr(struct inode *inode, struct file *file)
3161{
3162 struct trace_array *tr = inode->i_private;
3163
3164 trace_array_put(tr);
3165
3166 return single_release(inode, file);
3167}
3168
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169static int tracing_open(struct inode *inode, struct file *file)
3170{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003171 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003172 struct trace_iterator *iter;
3173 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003175 if (trace_array_get(tr) < 0)
3176 return -ENODEV;
3177
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003179 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3180 int cpu = tracing_get_cpu(inode);
3181
3182 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003183 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003184 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003185 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003186 }
3187
3188 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003189 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003190 if (IS_ERR(iter))
3191 ret = PTR_ERR(iter);
3192 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3193 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3194 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003195
3196 if (ret < 0)
3197 trace_array_put(tr);
3198
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003199 return ret;
3200}
3201
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003202/*
3203 * Some tracers are not suitable for instance buffers.
3204 * A tracer is always available for the global array (toplevel)
3205 * or if it explicitly states that it is.
3206 */
3207static bool
3208trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3209{
3210 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3211}
3212
3213/* Find the next tracer that this trace array may use */
3214static struct tracer *
3215get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3216{
3217 while (t && !trace_ok_for_array(t, tr))
3218 t = t->next;
3219
3220 return t;
3221}
3222
Ingo Molnare309b412008-05-12 21:20:51 +02003223static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003224t_next(struct seq_file *m, void *v, loff_t *pos)
3225{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003226 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003227 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003228
3229 (*pos)++;
3230
3231 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003232 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003233
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234 return t;
3235}
3236
3237static void *t_start(struct seq_file *m, loff_t *pos)
3238{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003239 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003240 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241 loff_t l = 0;
3242
3243 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003244
3245 t = get_tracer_for_array(tr, trace_types);
3246 for (; t && l < *pos; t = t_next(m, t, &l))
3247 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003248
3249 return t;
3250}
3251
3252static void t_stop(struct seq_file *m, void *p)
3253{
3254 mutex_unlock(&trace_types_lock);
3255}
3256
3257static int t_show(struct seq_file *m, void *v)
3258{
3259 struct tracer *t = v;
3260
3261 if (!t)
3262 return 0;
3263
3264 seq_printf(m, "%s", t->name);
3265 if (t->next)
3266 seq_putc(m, ' ');
3267 else
3268 seq_putc(m, '\n');
3269
3270 return 0;
3271}
3272
James Morris88e9d342009-09-22 16:43:43 -07003273static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003274 .start = t_start,
3275 .next = t_next,
3276 .stop = t_stop,
3277 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003278};
3279
3280static int show_traces_open(struct inode *inode, struct file *file)
3281{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003282 struct trace_array *tr = inode->i_private;
3283 struct seq_file *m;
3284 int ret;
3285
Steven Rostedt60a11772008-05-12 21:20:44 +02003286 if (tracing_disabled)
3287 return -ENODEV;
3288
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003289 ret = seq_open(file, &show_traces_seq_ops);
3290 if (ret)
3291 return ret;
3292
3293 m = file->private_data;
3294 m->private = tr;
3295
3296 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003297}
3298
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003299static ssize_t
3300tracing_write_stub(struct file *filp, const char __user *ubuf,
3301 size_t count, loff_t *ppos)
3302{
3303 return count;
3304}
3305
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003306loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003307{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003308 int ret;
3309
Slava Pestov364829b2010-11-24 15:13:16 -08003310 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003311 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003312 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003313 file->f_pos = ret = 0;
3314
3315 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003316}
3317
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003318static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003319 .open = tracing_open,
3320 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003321 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003322 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003323 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003324};
3325
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003326static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003327 .open = show_traces_open,
3328 .read = seq_read,
3329 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003330 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003331};
3332
Ingo Molnar36dfe922008-05-12 21:20:52 +02003333/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003334 * The tracer itself will not take this lock, but still we want
3335 * to provide a consistent cpumask to user-space:
3336 */
3337static DEFINE_MUTEX(tracing_cpumask_update_lock);
3338
3339/*
3340 * Temporary storage for the character representation of the
3341 * CPU bitmask (and one more byte for the newline):
3342 */
3343static char mask_str[NR_CPUS + 1];
3344
Ingo Molnarc7078de2008-05-12 21:20:52 +02003345static ssize_t
3346tracing_cpumask_read(struct file *filp, char __user *ubuf,
3347 size_t count, loff_t *ppos)
3348{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003349 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003350 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003351
3352 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003353
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003354 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003355 if (count - len < 2) {
3356 count = -EINVAL;
3357 goto out_err;
3358 }
3359 len += sprintf(mask_str + len, "\n");
3360 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3361
3362out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003363 mutex_unlock(&tracing_cpumask_update_lock);
3364
3365 return count;
3366}
3367
3368static ssize_t
3369tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3370 size_t count, loff_t *ppos)
3371{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003372 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303373 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003374 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303375
3376 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3377 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003378
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303379 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003380 if (err)
3381 goto err_unlock;
3382
Li Zefan215368e2009-06-15 10:56:42 +08003383 mutex_lock(&tracing_cpumask_update_lock);
3384
Steven Rostedta5e25882008-12-02 15:34:05 -05003385 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003386 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003387 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003388 /*
3389 * Increase/decrease the disabled counter if we are
3390 * about to flip a bit in the cpumask:
3391 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003392 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303393 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003394 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3395 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003396 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003397 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303398 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003399 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3400 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003401 }
3402 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003403 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003404 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003405
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003406 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003407
Ingo Molnarc7078de2008-05-12 21:20:52 +02003408 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303409 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003410
Ingo Molnarc7078de2008-05-12 21:20:52 +02003411 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003412
3413err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003414 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003415
3416 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003417}
3418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003419static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003420 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003421 .read = tracing_cpumask_read,
3422 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003423 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003424 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003425};
3426
Li Zefanfdb372e2009-12-08 11:15:59 +08003427static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003429 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003430 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003431 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003432 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003433
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003434 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003435 tracer_flags = tr->current_trace->flags->val;
3436 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003437
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003438 for (i = 0; trace_options[i]; i++) {
3439 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003440 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003441 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003442 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443 }
3444
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003445 for (i = 0; trace_opts[i].name; i++) {
3446 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003447 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003448 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003449 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003451 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452
Li Zefanfdb372e2009-12-08 11:15:59 +08003453 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454}
3455
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003456static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003457 struct tracer_flags *tracer_flags,
3458 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003459{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003460 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003461 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003462
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003463 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003464 if (ret)
3465 return ret;
3466
3467 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003468 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003469 else
Zhaolei77708412009-08-07 18:53:21 +08003470 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003471 return 0;
3472}
3473
Li Zefan8d18eaa2009-12-08 11:17:06 +08003474/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003475static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003476{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003477 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003478 struct tracer_flags *tracer_flags = trace->flags;
3479 struct tracer_opt *opts = NULL;
3480 int i;
3481
3482 for (i = 0; tracer_flags->opts[i].name; i++) {
3483 opts = &tracer_flags->opts[i];
3484
3485 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003486 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003487 }
3488
3489 return -EINVAL;
3490}
3491
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003492/* Some tracers require overwrite to stay enabled */
3493int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3494{
3495 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3496 return -1;
3497
3498 return 0;
3499}
3500
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003501int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003502{
3503 /* do nothing if flag is already set */
3504 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003505 return 0;
3506
3507 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003508 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003509 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003510 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003511
3512 if (enabled)
3513 trace_flags |= mask;
3514 else
3515 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003516
3517 if (mask == TRACE_ITER_RECORD_CMD)
3518 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003519
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003520 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003521 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003522#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003523 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003524#endif
3525 }
Steven Rostedt81698832012-10-11 10:15:05 -04003526
3527 if (mask == TRACE_ITER_PRINTK)
3528 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003529
3530 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003531}
3532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003533static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003535 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003537 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538 int i;
3539
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003540 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541
Li Zefan8d18eaa2009-12-08 11:17:06 +08003542 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003543 neg = 1;
3544 cmp += 2;
3545 }
3546
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003547 mutex_lock(&trace_types_lock);
3548
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003550 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003551 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003552 break;
3553 }
3554 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003555
3556 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003557 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003558 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003559
3560 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003561
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003562 return ret;
3563}
3564
3565static ssize_t
3566tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3567 size_t cnt, loff_t *ppos)
3568{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003569 struct seq_file *m = filp->private_data;
3570 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003571 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003572 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003573
3574 if (cnt >= sizeof(buf))
3575 return -EINVAL;
3576
3577 if (copy_from_user(&buf, ubuf, cnt))
3578 return -EFAULT;
3579
Steven Rostedta8dd2172013-01-09 20:54:17 -05003580 buf[cnt] = 0;
3581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003582 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003583 if (ret < 0)
3584 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003585
Jiri Olsacf8517c2009-10-23 19:36:16 -04003586 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587
3588 return cnt;
3589}
3590
Li Zefanfdb372e2009-12-08 11:15:59 +08003591static int tracing_trace_options_open(struct inode *inode, struct file *file)
3592{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003593 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003594 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003595
Li Zefanfdb372e2009-12-08 11:15:59 +08003596 if (tracing_disabled)
3597 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003598
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003599 if (trace_array_get(tr) < 0)
3600 return -ENODEV;
3601
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003602 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3603 if (ret < 0)
3604 trace_array_put(tr);
3605
3606 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003607}
3608
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003609static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003610 .open = tracing_trace_options_open,
3611 .read = seq_read,
3612 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003613 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003614 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615};
3616
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003617static const char readme_msg[] =
3618 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003619 "# echo 0 > tracing_on : quick way to disable tracing\n"
3620 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3621 " Important files:\n"
3622 " trace\t\t\t- The static contents of the buffer\n"
3623 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3624 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3625 " current_tracer\t- function and latency tracers\n"
3626 " available_tracers\t- list of configured tracers for current_tracer\n"
3627 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3628 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3629 " trace_clock\t\t-change the clock used to order events\n"
3630 " local: Per cpu clock but may not be synced across CPUs\n"
3631 " global: Synced across CPUs but slows tracing down.\n"
3632 " counter: Not a clock, but just an increment\n"
3633 " uptime: Jiffy counter from time of boot\n"
3634 " perf: Same clock that perf events use\n"
3635#ifdef CONFIG_X86_64
3636 " x86-tsc: TSC cycle counter\n"
3637#endif
3638 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3639 " tracing_cpumask\t- Limit which CPUs to trace\n"
3640 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3641 "\t\t\t Remove sub-buffer with rmdir\n"
3642 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003643 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3644 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003645 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003646#ifdef CONFIG_DYNAMIC_FTRACE
3647 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003648 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3649 "\t\t\t functions\n"
3650 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3651 "\t modules: Can select a group via module\n"
3652 "\t Format: :mod:<module-name>\n"
3653 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3654 "\t triggers: a command to perform when function is hit\n"
3655 "\t Format: <function>:<trigger>[:count]\n"
3656 "\t trigger: traceon, traceoff\n"
3657 "\t\t enable_event:<system>:<event>\n"
3658 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003659#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003660 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003661#endif
3662#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003663 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003664#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003665 "\t\t dump\n"
3666 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003667 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3668 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3669 "\t The first one will disable tracing every time do_fault is hit\n"
3670 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3671 "\t The first time do trap is hit and it disables tracing, the\n"
3672 "\t counter will decrement to 2. If tracing is already disabled,\n"
3673 "\t the counter will not decrement. It only decrements when the\n"
3674 "\t trigger did work\n"
3675 "\t To remove trigger without count:\n"
3676 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3677 "\t To remove trigger with a count:\n"
3678 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003679 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003680 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3681 "\t modules: Can select a group via module command :mod:\n"
3682 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003683#endif /* CONFIG_DYNAMIC_FTRACE */
3684#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003685 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3686 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687#endif
3688#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3689 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3690 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3691#endif
3692#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3694 "\t\t\t snapshot buffer. Read the contents for more\n"
3695 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003697#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698 " stack_trace\t\t- Shows the max stack trace when active\n"
3699 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003700 "\t\t\t Write into this file to reset the max size (trigger a\n"
3701 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003702#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3704 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003705#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003706#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003707 " events/\t\t- Directory containing all trace event subsystems:\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3709 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003710 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3711 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003712 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 " events/<system>/<event>/\t- Directory containing control files for\n"
3714 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3716 " filter\t\t- If set, only events passing filter are traced\n"
3717 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 "\t Format: <trigger>[:count][if <filter>]\n"
3719 "\t trigger: traceon, traceoff\n"
3720 "\t enable_event:<system>:<event>\n"
3721 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003722#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003723 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724#endif
3725#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003726 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3729 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3730 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3731 "\t events/block/block_unplug/trigger\n"
3732 "\t The first disables tracing every time block_unplug is hit.\n"
3733 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3734 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3735 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3736 "\t Like function triggers, the counter is only decremented if it\n"
3737 "\t enabled or disabled tracing.\n"
3738 "\t To remove a trigger without a count:\n"
3739 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3740 "\t To remove a trigger with a count:\n"
3741 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3742 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003743;
3744
3745static ssize_t
3746tracing_readme_read(struct file *filp, char __user *ubuf,
3747 size_t cnt, loff_t *ppos)
3748{
3749 return simple_read_from_buffer(ubuf, cnt, ppos,
3750 readme_msg, strlen(readme_msg));
3751}
3752
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003753static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003754 .open = tracing_open_generic,
3755 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003756 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003757};
3758
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 if (*pos || m->count)
3764 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3769 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003771 continue;
3772
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003773 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774 }
3775
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003776 return NULL;
3777}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003778
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003779static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3780{
3781 void *v;
3782 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003783
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003784 preempt_disable();
3785 arch_spin_lock(&trace_cmdline_lock);
3786
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003787 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003788 while (l <= *pos) {
3789 v = saved_cmdlines_next(m, v, &l);
3790 if (!v)
3791 return NULL;
3792 }
3793
3794 return v;
3795}
3796
3797static void saved_cmdlines_stop(struct seq_file *m, void *v)
3798{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003799 arch_spin_unlock(&trace_cmdline_lock);
3800 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003801}
3802
3803static int saved_cmdlines_show(struct seq_file *m, void *v)
3804{
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3807
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003808 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003809 seq_printf(m, "%d %s\n", *pid, buf);
3810 return 0;
3811}
3812
3813static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3818};
3819
3820static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3821{
3822 if (tracing_disabled)
3823 return -ENODEV;
3824
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003826}
3827
3828static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003829 .open = tracing_saved_cmdlines_open,
3830 .read = seq_read,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003833};
3834
3835static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003836tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3837 size_t cnt, loff_t *ppos)
3838{
3839 char buf[64];
3840 int r;
3841
3842 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003843 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003844 arch_spin_unlock(&trace_cmdline_lock);
3845
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3847}
3848
3849static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3850{
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3853 kfree(s);
3854}
3855
3856static int tracing_resize_saved_cmdlines(unsigned int val)
3857{
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3859
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003860 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003861 if (!s)
3862 return -ENOMEM;
3863
3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3865 kfree(s);
3866 return -ENOMEM;
3867 }
3868
3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3871 savedcmd = s;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3874
3875 return 0;
3876}
3877
3878static ssize_t
3879tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3880 size_t cnt, loff_t *ppos)
3881{
3882 unsigned long val;
3883 int ret;
3884
3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3886 if (ret)
3887 return ret;
3888
3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890 if (!val || val > PID_MAX_DEFAULT)
3891 return -EINVAL;
3892
3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3894 if (ret < 0)
3895 return ret;
3896
3897 *ppos += cnt;
3898
3899 return cnt;
3900}
3901
3902static const struct file_operations tracing_saved_cmdlines_size_fops = {
3903 .open = tracing_open_generic,
3904 .read = tracing_saved_cmdlines_size_read,
3905 .write = tracing_saved_cmdlines_size_write,
3906};
3907
3908static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909tracing_set_trace_read(struct file *filp, char __user *ubuf,
3910 size_t cnt, loff_t *ppos)
3911{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003912 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003913 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914 int r;
3915
3916 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003917 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003918 mutex_unlock(&trace_types_lock);
3919
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003920 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921}
3922
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003923int tracer_init(struct tracer *t, struct trace_array *tr)
3924{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003925 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003926 return t->init(tr);
3927}
3928
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003930{
3931 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003932
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003933 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003934 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935}
3936
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003938/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3940 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941{
3942 int cpu, ret = 0;
3943
3944 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3945 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 ret = ring_buffer_resize(trace_buf->buffer,
3947 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003948 if (ret < 0)
3949 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003950 per_cpu_ptr(trace_buf->data, cpu)->entries =
3951 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003952 }
3953 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954 ret = ring_buffer_resize(trace_buf->buffer,
3955 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003956 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003957 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3958 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003959 }
3960
3961 return ret;
3962}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003963#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003964
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003965static int __tracing_resize_ring_buffer(struct trace_array *tr,
3966 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003967{
3968 int ret;
3969
3970 /*
3971 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003972 * we use the size that was given, and we can forget about
3973 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003974 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003975 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003976
Steven Rostedtb382ede62012-10-10 21:44:34 -04003977 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003978 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003979 return 0;
3980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003982 if (ret < 0)
3983 return ret;
3984
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003985#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003986 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3987 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003988 goto out;
3989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003991 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003992 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3993 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003994 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003995 /*
3996 * AARGH! We are left with different
3997 * size max buffer!!!!
3998 * The max buffer is our "snapshot" buffer.
3999 * When a tracer needs a snapshot (one of the
4000 * latency tracers), it swaps the max buffer
4001 * with the saved snap shot. We succeeded to
4002 * update the size of the main buffer, but failed to
4003 * update the size of the max buffer. But when we tried
4004 * to reset the main buffer to the original size, we
4005 * failed there too. This is very unlikely to
4006 * happen, but if it does, warn and kill all
4007 * tracing.
4008 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004009 WARN_ON(1);
4010 tracing_disabled = 1;
4011 }
4012 return ret;
4013 }
4014
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004015 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004018 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004019
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004020 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004021#endif /* CONFIG_TRACER_MAX_TRACE */
4022
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004023 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004024 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004027
4028 return ret;
4029}
4030
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4032 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004033{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004034 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035
4036 mutex_lock(&trace_types_lock);
4037
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004038 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4039 /* make sure, this cpu is enabled in the mask */
4040 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4041 ret = -EINVAL;
4042 goto out;
4043 }
4044 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004046 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047 if (ret < 0)
4048 ret = -ENOMEM;
4049
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004050out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004051 mutex_unlock(&trace_types_lock);
4052
4053 return ret;
4054}
4055
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004056
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004057/**
4058 * tracing_update_buffers - used by tracing facility to expand ring buffers
4059 *
4060 * To save on memory when the tracing is never used on a system with it
4061 * configured in. The ring buffers are set to a minimum size. But once
4062 * a user starts to use the tracing facility, then they need to grow
4063 * to their default size.
4064 *
4065 * This function is to be called when a tracer is about to be used.
4066 */
4067int tracing_update_buffers(void)
4068{
4069 int ret = 0;
4070
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004071 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004072 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004074 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004075 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004076
4077 return ret;
4078}
4079
Steven Rostedt577b7852009-02-26 23:43:05 -05004080struct trace_option_dentry;
4081
4082static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004083create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004084
4085static void
4086destroy_trace_option_files(struct trace_option_dentry *topts);
4087
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004088/*
4089 * Used to clear out the tracer before deletion of an instance.
4090 * Must have trace_types_lock held.
4091 */
4092static void tracing_set_nop(struct trace_array *tr)
4093{
4094 if (tr->current_trace == &nop_trace)
4095 return;
4096
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004097 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004098
4099 if (tr->current_trace->reset)
4100 tr->current_trace->reset(tr);
4101
4102 tr->current_trace = &nop_trace;
4103}
4104
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004105static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106{
Steven Rostedt577b7852009-02-26 23:43:05 -05004107 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004109#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004110 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004111#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004112 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004114 mutex_lock(&trace_types_lock);
4115
Steven Rostedt73c51622009-03-11 13:42:01 -04004116 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004117 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004118 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004119 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004120 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004121 ret = 0;
4122 }
4123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004124 for (t = trace_types; t; t = t->next) {
4125 if (strcmp(t->name, buf) == 0)
4126 break;
4127 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004128 if (!t) {
4129 ret = -EINVAL;
4130 goto out;
4131 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004132 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004133 goto out;
4134
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004135 /* Some tracers are only allowed for the top level buffer */
4136 if (!trace_ok_for_array(t, tr)) {
4137 ret = -EINVAL;
4138 goto out;
4139 }
4140
Steven Rostedt9f029e82008-11-12 15:24:24 -05004141 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004142
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004143 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004144
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004145 if (tr->current_trace->reset)
4146 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004147
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004148 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004149 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004150
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004151#ifdef CONFIG_TRACER_MAX_TRACE
4152 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004153
4154 if (had_max_tr && !t->use_max_tr) {
4155 /*
4156 * We need to make sure that the update_max_tr sees that
4157 * current_trace changed to nop_trace to keep it from
4158 * swapping the buffers after we resize it.
4159 * The update_max_tr is called from interrupts disabled
4160 * so a synchronized_sched() is sufficient.
4161 */
4162 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004163 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004164 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004165#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004166 /* Currently, only the top instance has options */
4167 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4168 destroy_trace_option_files(topts);
4169 topts = create_trace_option_files(tr, t);
4170 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171
4172#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004173 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004174 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004175 if (ret < 0)
4176 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004177 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004178#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004179
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004180 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004181 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004182 if (ret)
4183 goto out;
4184 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004186 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004187 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004188 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004189 out:
4190 mutex_unlock(&trace_types_lock);
4191
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004192 return ret;
4193}
4194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195static ssize_t
4196tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4197 size_t cnt, loff_t *ppos)
4198{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004199 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004200 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201 int i;
4202 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004203 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004204
Steven Rostedt60063a62008-10-28 10:44:24 -04004205 ret = cnt;
4206
Li Zefanee6c2c12009-09-18 14:06:47 +08004207 if (cnt > MAX_TRACER_SIZE)
4208 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209
4210 if (copy_from_user(&buf, ubuf, cnt))
4211 return -EFAULT;
4212
4213 buf[cnt] = 0;
4214
4215 /* strip ending whitespace. */
4216 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4217 buf[i] = 0;
4218
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004219 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004220 if (err)
4221 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222
Jiri Olsacf8517c2009-10-23 19:36:16 -04004223 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004225 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004226}
4227
4228static ssize_t
4229tracing_max_lat_read(struct file *filp, char __user *ubuf,
4230 size_t cnt, loff_t *ppos)
4231{
4232 unsigned long *ptr = filp->private_data;
4233 char buf[64];
4234 int r;
4235
Steven Rostedtcffae432008-05-12 21:21:00 +02004236 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004238 if (r > sizeof(buf))
4239 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004240 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241}
4242
4243static ssize_t
4244tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4245 size_t cnt, loff_t *ppos)
4246{
Hannes Eder5e398412009-02-10 19:44:34 +01004247 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004248 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004249 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004250
Peter Huewe22fe9b52011-06-07 21:58:27 +02004251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4252 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004253 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004254
4255 *ptr = val * 1000;
4256
4257 return cnt;
4258}
4259
Steven Rostedtb3806b42008-05-12 21:20:46 +02004260static int tracing_open_pipe(struct inode *inode, struct file *filp)
4261{
Oleg Nesterov15544202013-07-23 17:25:57 +02004262 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004263 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004264 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004265
4266 if (tracing_disabled)
4267 return -ENODEV;
4268
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004269 if (trace_array_get(tr) < 0)
4270 return -ENODEV;
4271
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004272 mutex_lock(&trace_types_lock);
4273
Steven Rostedtb3806b42008-05-12 21:20:46 +02004274 /* create a buffer to store the information to pass to userspace */
4275 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004276 if (!iter) {
4277 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004278 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004279 goto out;
4280 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004281
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004282 /*
4283 * We make a copy of the current tracer to avoid concurrent
4284 * changes on it while we are reading.
4285 */
4286 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4287 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004288 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004289 goto fail;
4290 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004291 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004292
4293 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4294 ret = -ENOMEM;
4295 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304296 }
4297
Steven Rostedta3097202008-11-07 22:36:02 -05004298 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304299 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004300
Steven Rostedt112f38a72009-06-01 15:16:05 -04004301 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4302 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4303
David Sharp8be07092012-11-13 12:18:22 -08004304 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004305 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004306 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4307
Oleg Nesterov15544202013-07-23 17:25:57 +02004308 iter->tr = tr;
4309 iter->trace_buffer = &tr->trace_buffer;
4310 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004311 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004312 filp->private_data = iter;
4313
Steven Rostedt107bad82008-05-12 21:21:01 +02004314 if (iter->trace->pipe_open)
4315 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004316
Arnd Bergmannb4447862010-07-07 23:40:11 +02004317 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004318out:
4319 mutex_unlock(&trace_types_lock);
4320 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004321
4322fail:
4323 kfree(iter->trace);
4324 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004325 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004326 mutex_unlock(&trace_types_lock);
4327 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004328}
4329
4330static int tracing_release_pipe(struct inode *inode, struct file *file)
4331{
4332 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004333 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004334
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004335 mutex_lock(&trace_types_lock);
4336
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004337 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004338 iter->trace->pipe_close(iter);
4339
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004340 mutex_unlock(&trace_types_lock);
4341
Rusty Russell44623442009-01-01 10:12:23 +10304342 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004343 mutex_destroy(&iter->mutex);
4344 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004345 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004346
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004347 trace_array_put(tr);
4348
Steven Rostedtb3806b42008-05-12 21:20:46 +02004349 return 0;
4350}
4351
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004352static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004353trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004354{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004355 /* Iterators are static, they should be filled or empty */
4356 if (trace_buffer_iter(iter, iter->cpu_file))
4357 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004358
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004359 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004360 /*
4361 * Always select as readable when in blocking mode
4362 */
4363 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004364 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004365 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004366 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004367}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004368
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004369static unsigned int
4370tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4371{
4372 struct trace_iterator *iter = filp->private_data;
4373
4374 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004375}
4376
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004377/* Must be called with trace_types_lock mutex held. */
4378static int tracing_wait_pipe(struct file *filp)
4379{
4380 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004381 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004382
4383 while (trace_empty(iter)) {
4384
4385 if ((filp->f_flags & O_NONBLOCK)) {
4386 return -EAGAIN;
4387 }
4388
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004389 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004390 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004391 * We still block if tracing is disabled, but we have never
4392 * read anything. This allows a user to cat this file, and
4393 * then enable tracing. But after we have read something,
4394 * we give an EOF when tracing is again disabled.
4395 *
4396 * iter->pos will be 0 if we haven't read anything.
4397 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004398 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004399 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004400
4401 mutex_unlock(&iter->mutex);
4402
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004403 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004404
4405 mutex_lock(&iter->mutex);
4406
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004407 if (ret)
4408 return ret;
4409
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004410 if (signal_pending(current))
4411 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004412 }
4413
4414 return 1;
4415}
4416
Steven Rostedtb3806b42008-05-12 21:20:46 +02004417/*
4418 * Consumer reader.
4419 */
4420static ssize_t
4421tracing_read_pipe(struct file *filp, char __user *ubuf,
4422 size_t cnt, loff_t *ppos)
4423{
4424 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004425 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004426 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004427
4428 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004429 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4430 if (sret != -EBUSY)
4431 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004432
Steven Rostedtf9520752009-03-02 14:04:40 -05004433 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004434
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004435 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004436 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004437 if (unlikely(iter->trace->name != tr->current_trace->name))
4438 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004439 mutex_unlock(&trace_types_lock);
4440
4441 /*
4442 * Avoid more than one consumer on a single file descriptor
4443 * This is just a matter of traces coherency, the ring buffer itself
4444 * is protected.
4445 */
4446 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004447 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004448 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4449 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004450 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004451 }
4452
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004453waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004454 sret = tracing_wait_pipe(filp);
4455 if (sret <= 0)
4456 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004457
4458 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004459 if (trace_empty(iter)) {
4460 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004461 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004462 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004463
4464 if (cnt >= PAGE_SIZE)
4465 cnt = PAGE_SIZE - 1;
4466
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004467 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004468 memset(&iter->seq, 0,
4469 sizeof(struct trace_iterator) -
4470 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004471 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004472 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004473
Lai Jiangshan4f535962009-05-18 19:35:34 +08004474 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004475 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004476 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004477 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004478 int len = iter->seq.len;
4479
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004480 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004481 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004482 /* don't print partial lines */
4483 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004484 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004485 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004486 if (ret != TRACE_TYPE_NO_CONSUME)
4487 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004488
4489 if (iter->seq.len >= cnt)
4490 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004491
4492 /*
4493 * Setting the full flag means we reached the trace_seq buffer
4494 * size and we should leave by partial output condition above.
4495 * One of the trace_seq_* functions is not used properly.
4496 */
4497 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4498 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004499 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004500 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004501 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004502
Steven Rostedtb3806b42008-05-12 21:20:46 +02004503 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004504 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4505 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004506 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004507
4508 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004509 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004510 * entries, go back to wait for more entries.
4511 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004512 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004513 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004514
Steven Rostedt107bad82008-05-12 21:21:01 +02004515out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004516 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004517
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004518 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004519}
4520
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004521static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4522 unsigned int idx)
4523{
4524 __free_page(spd->pages[idx]);
4525}
4526
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004527static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004528 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004529 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004530 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004531 .steal = generic_pipe_buf_steal,
4532 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004533};
4534
Steven Rostedt34cd4992009-02-09 12:06:29 -05004535static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004536tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004537{
4538 size_t count;
4539 int ret;
4540
4541 /* Seq buffer is page-sized, exactly what we need. */
4542 for (;;) {
4543 count = iter->seq.len;
4544 ret = print_trace_line(iter);
4545 count = iter->seq.len - count;
4546 if (rem < count) {
4547 rem = 0;
4548 iter->seq.len -= count;
4549 break;
4550 }
4551 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4552 iter->seq.len -= count;
4553 break;
4554 }
4555
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004556 if (ret != TRACE_TYPE_NO_CONSUME)
4557 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004558 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004559 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004560 rem = 0;
4561 iter->ent = NULL;
4562 break;
4563 }
4564 }
4565
4566 return rem;
4567}
4568
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004569static ssize_t tracing_splice_read_pipe(struct file *filp,
4570 loff_t *ppos,
4571 struct pipe_inode_info *pipe,
4572 size_t len,
4573 unsigned int flags)
4574{
Jens Axboe35f3d142010-05-20 10:43:18 +02004575 struct page *pages_def[PIPE_DEF_BUFFERS];
4576 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004577 struct trace_iterator *iter = filp->private_data;
4578 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004579 .pages = pages_def,
4580 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004581 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004582 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004583 .flags = flags,
4584 .ops = &tracing_pipe_buf_ops,
4585 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004586 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004587 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004588 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004589 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004590 unsigned int i;
4591
Jens Axboe35f3d142010-05-20 10:43:18 +02004592 if (splice_grow_spd(pipe, &spd))
4593 return -ENOMEM;
4594
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004595 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004596 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004597 if (unlikely(iter->trace->name != tr->current_trace->name))
4598 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004599 mutex_unlock(&trace_types_lock);
4600
4601 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004602
4603 if (iter->trace->splice_read) {
4604 ret = iter->trace->splice_read(iter, filp,
4605 ppos, pipe, len, flags);
4606 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004607 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004608 }
4609
4610 ret = tracing_wait_pipe(filp);
4611 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004612 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004613
Jason Wessel955b61e2010-08-05 09:22:23 -05004614 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004615 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004616 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004617 }
4618
Lai Jiangshan4f535962009-05-18 19:35:34 +08004619 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004620 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004621
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004622 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004623 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004624 spd.pages[i] = alloc_page(GFP_KERNEL);
4625 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004626 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004627
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004628 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629
4630 /* Copy the data into the page, so we can start over. */
4631 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004632 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004633 iter->seq.len);
4634 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004635 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004636 break;
4637 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004638 spd.partial[i].offset = 0;
4639 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004640
Steven Rostedtf9520752009-03-02 14:04:40 -05004641 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004642 }
4643
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004644 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004645 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004646 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004647
4648 spd.nr_pages = i;
4649
Jens Axboe35f3d142010-05-20 10:43:18 +02004650 ret = splice_to_pipe(pipe, &spd);
4651out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004652 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004653 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654
Steven Rostedt34cd4992009-02-09 12:06:29 -05004655out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004656 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004657 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004658}
4659
Steven Rostedta98a3c32008-05-12 21:20:59 +02004660static ssize_t
4661tracing_entries_read(struct file *filp, char __user *ubuf,
4662 size_t cnt, loff_t *ppos)
4663{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004664 struct inode *inode = file_inode(filp);
4665 struct trace_array *tr = inode->i_private;
4666 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004667 char buf[64];
4668 int r = 0;
4669 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004670
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004671 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004672
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004673 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004674 int cpu, buf_size_same;
4675 unsigned long size;
4676
4677 size = 0;
4678 buf_size_same = 1;
4679 /* check if all cpu sizes are same */
4680 for_each_tracing_cpu(cpu) {
4681 /* fill in the size from first enabled cpu */
4682 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004683 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4684 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004685 buf_size_same = 0;
4686 break;
4687 }
4688 }
4689
4690 if (buf_size_same) {
4691 if (!ring_buffer_expanded)
4692 r = sprintf(buf, "%lu (expanded: %lu)\n",
4693 size >> 10,
4694 trace_buf_size >> 10);
4695 else
4696 r = sprintf(buf, "%lu\n", size >> 10);
4697 } else
4698 r = sprintf(buf, "X\n");
4699 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004700 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004701
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004702 mutex_unlock(&trace_types_lock);
4703
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004704 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4705 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004706}
4707
4708static ssize_t
4709tracing_entries_write(struct file *filp, const char __user *ubuf,
4710 size_t cnt, loff_t *ppos)
4711{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004712 struct inode *inode = file_inode(filp);
4713 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004714 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004715 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004716
Peter Huewe22fe9b52011-06-07 21:58:27 +02004717 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4718 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004719 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004720
4721 /* must have at least 1 entry */
4722 if (!val)
4723 return -EINVAL;
4724
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004725 /* value is in KB */
4726 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004727 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004728 if (ret < 0)
4729 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004730
Jiri Olsacf8517c2009-10-23 19:36:16 -04004731 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004732
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004733 return cnt;
4734}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004735
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004736static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004737tracing_total_entries_read(struct file *filp, char __user *ubuf,
4738 size_t cnt, loff_t *ppos)
4739{
4740 struct trace_array *tr = filp->private_data;
4741 char buf[64];
4742 int r, cpu;
4743 unsigned long size = 0, expanded_size = 0;
4744
4745 mutex_lock(&trace_types_lock);
4746 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004747 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004748 if (!ring_buffer_expanded)
4749 expanded_size += trace_buf_size >> 10;
4750 }
4751 if (ring_buffer_expanded)
4752 r = sprintf(buf, "%lu\n", size);
4753 else
4754 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4755 mutex_unlock(&trace_types_lock);
4756
4757 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4758}
4759
4760static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004761tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4762 size_t cnt, loff_t *ppos)
4763{
4764 /*
4765 * There is no need to read what the user has written, this function
4766 * is just to make sure that there is no error when "echo" is used
4767 */
4768
4769 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004770
4771 return cnt;
4772}
4773
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004774static int
4775tracing_free_buffer_release(struct inode *inode, struct file *filp)
4776{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004777 struct trace_array *tr = inode->i_private;
4778
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004779 /* disable tracing ? */
4780 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004781 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004782 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004783 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004784
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004785 trace_array_put(tr);
4786
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004787 return 0;
4788}
4789
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004790static ssize_t
4791tracing_mark_write(struct file *filp, const char __user *ubuf,
4792 size_t cnt, loff_t *fpos)
4793{
Steven Rostedtd696b582011-09-22 11:50:27 -04004794 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004795 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004796 struct ring_buffer_event *event;
4797 struct ring_buffer *buffer;
4798 struct print_entry *entry;
4799 unsigned long irq_flags;
4800 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004801 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004802 int nr_pages = 1;
4803 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004804 int offset;
4805 int size;
4806 int len;
4807 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004808 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004809
Steven Rostedtc76f0692008-11-07 22:36:02 -05004810 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004811 return -EINVAL;
4812
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004813 if (!(trace_flags & TRACE_ITER_MARKERS))
4814 return -EINVAL;
4815
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004816 if (cnt > TRACE_BUF_SIZE)
4817 cnt = TRACE_BUF_SIZE;
4818
Steven Rostedtd696b582011-09-22 11:50:27 -04004819 /*
4820 * Userspace is injecting traces into the kernel trace buffer.
4821 * We want to be as non intrusive as possible.
4822 * To do so, we do not want to allocate any special buffers
4823 * or take any locks, but instead write the userspace data
4824 * straight into the ring buffer.
4825 *
4826 * First we need to pin the userspace buffer into memory,
4827 * which, most likely it is, because it just referenced it.
4828 * But there's no guarantee that it is. By using get_user_pages_fast()
4829 * and kmap_atomic/kunmap_atomic() we can get access to the
4830 * pages directly. We then write the data directly into the
4831 * ring buffer.
4832 */
4833 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004834
Steven Rostedtd696b582011-09-22 11:50:27 -04004835 /* check if we cross pages */
4836 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4837 nr_pages = 2;
4838
4839 offset = addr & (PAGE_SIZE - 1);
4840 addr &= PAGE_MASK;
4841
4842 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4843 if (ret < nr_pages) {
4844 while (--ret >= 0)
4845 put_page(pages[ret]);
4846 written = -EFAULT;
4847 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004848 }
4849
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004850 for (i = 0; i < nr_pages; i++)
4851 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004852
4853 local_save_flags(irq_flags);
4854 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004855 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004856 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4857 irq_flags, preempt_count());
4858 if (!event) {
4859 /* Ring buffer disabled, return as if not open for write */
4860 written = -EBADF;
4861 goto out_unlock;
4862 }
4863
4864 entry = ring_buffer_event_data(event);
4865 entry->ip = _THIS_IP_;
4866
4867 if (nr_pages == 2) {
4868 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004869 memcpy(&entry->buf, map_page[0] + offset, len);
4870 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004871 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004872 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004873
4874 if (entry->buf[cnt - 1] != '\n') {
4875 entry->buf[cnt] = '\n';
4876 entry->buf[cnt + 1] = '\0';
4877 } else
4878 entry->buf[cnt] = '\0';
4879
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004880 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004881
4882 written = cnt;
4883
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004884 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004885
Steven Rostedtd696b582011-09-22 11:50:27 -04004886 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004887 for (i = 0; i < nr_pages; i++){
4888 kunmap_atomic(map_page[i]);
4889 put_page(pages[i]);
4890 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004891 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004892 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004893}
4894
Li Zefan13f16d22009-12-08 11:16:11 +08004895static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004896{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004897 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004898 int i;
4899
4900 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004901 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004902 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004903 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4904 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004905 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004906
Li Zefan13f16d22009-12-08 11:16:11 +08004907 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004908}
4909
Steven Rostedte1e232c2014-02-10 23:38:46 -05004910static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004911{
Zhaolei5079f322009-08-25 16:12:56 +08004912 int i;
4913
Zhaolei5079f322009-08-25 16:12:56 +08004914 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4915 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4916 break;
4917 }
4918 if (i == ARRAY_SIZE(trace_clocks))
4919 return -EINVAL;
4920
Zhaolei5079f322009-08-25 16:12:56 +08004921 mutex_lock(&trace_types_lock);
4922
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004923 tr->clock_id = i;
4924
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004925 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004926
David Sharp60303ed2012-10-11 16:27:52 -07004927 /*
4928 * New clock may not be consistent with the previous clock.
4929 * Reset the buffer so that it doesn't have incomparable timestamps.
4930 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004931 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004932
4933#ifdef CONFIG_TRACER_MAX_TRACE
4934 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4935 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004936 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004937#endif
David Sharp60303ed2012-10-11 16:27:52 -07004938
Zhaolei5079f322009-08-25 16:12:56 +08004939 mutex_unlock(&trace_types_lock);
4940
Steven Rostedte1e232c2014-02-10 23:38:46 -05004941 return 0;
4942}
4943
4944static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4945 size_t cnt, loff_t *fpos)
4946{
4947 struct seq_file *m = filp->private_data;
4948 struct trace_array *tr = m->private;
4949 char buf[64];
4950 const char *clockstr;
4951 int ret;
4952
4953 if (cnt >= sizeof(buf))
4954 return -EINVAL;
4955
4956 if (copy_from_user(&buf, ubuf, cnt))
4957 return -EFAULT;
4958
4959 buf[cnt] = 0;
4960
4961 clockstr = strstrip(buf);
4962
4963 ret = tracing_set_clock(tr, clockstr);
4964 if (ret)
4965 return ret;
4966
Zhaolei5079f322009-08-25 16:12:56 +08004967 *fpos += cnt;
4968
4969 return cnt;
4970}
4971
Li Zefan13f16d22009-12-08 11:16:11 +08004972static int tracing_clock_open(struct inode *inode, struct file *file)
4973{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004974 struct trace_array *tr = inode->i_private;
4975 int ret;
4976
Li Zefan13f16d22009-12-08 11:16:11 +08004977 if (tracing_disabled)
4978 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004979
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004980 if (trace_array_get(tr))
4981 return -ENODEV;
4982
4983 ret = single_open(file, tracing_clock_show, inode->i_private);
4984 if (ret < 0)
4985 trace_array_put(tr);
4986
4987 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004988}
4989
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004990struct ftrace_buffer_info {
4991 struct trace_iterator iter;
4992 void *spare;
4993 unsigned int read;
4994};
4995
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004996#ifdef CONFIG_TRACER_SNAPSHOT
4997static int tracing_snapshot_open(struct inode *inode, struct file *file)
4998{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004999 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005000 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005001 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005002 int ret = 0;
5003
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005004 if (trace_array_get(tr) < 0)
5005 return -ENODEV;
5006
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005007 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005008 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005009 if (IS_ERR(iter))
5010 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005011 } else {
5012 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005013 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005014 m = kzalloc(sizeof(*m), GFP_KERNEL);
5015 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005016 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005017 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5018 if (!iter) {
5019 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005020 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005021 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005022 ret = 0;
5023
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005024 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005025 iter->trace_buffer = &tr->max_buffer;
5026 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005027 m->private = iter;
5028 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005029 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005030out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005031 if (ret < 0)
5032 trace_array_put(tr);
5033
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005034 return ret;
5035}
5036
5037static ssize_t
5038tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5039 loff_t *ppos)
5040{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005041 struct seq_file *m = filp->private_data;
5042 struct trace_iterator *iter = m->private;
5043 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005044 unsigned long val;
5045 int ret;
5046
5047 ret = tracing_update_buffers();
5048 if (ret < 0)
5049 return ret;
5050
5051 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5052 if (ret)
5053 return ret;
5054
5055 mutex_lock(&trace_types_lock);
5056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005057 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005058 ret = -EBUSY;
5059 goto out;
5060 }
5061
5062 switch (val) {
5063 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005064 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5065 ret = -EINVAL;
5066 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005067 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005068 if (tr->allocated_snapshot)
5069 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005070 break;
5071 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005072/* Only allow per-cpu swap if the ring buffer supports it */
5073#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5074 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5075 ret = -EINVAL;
5076 break;
5077 }
5078#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005079 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005080 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005081 if (ret < 0)
5082 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005083 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005084 local_irq_disable();
5085 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005086 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005087 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005088 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005089 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005090 local_irq_enable();
5091 break;
5092 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005093 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005094 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5095 tracing_reset_online_cpus(&tr->max_buffer);
5096 else
5097 tracing_reset(&tr->max_buffer, iter->cpu_file);
5098 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005099 break;
5100 }
5101
5102 if (ret >= 0) {
5103 *ppos += cnt;
5104 ret = cnt;
5105 }
5106out:
5107 mutex_unlock(&trace_types_lock);
5108 return ret;
5109}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005110
5111static int tracing_snapshot_release(struct inode *inode, struct file *file)
5112{
5113 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005114 int ret;
5115
5116 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005117
5118 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005119 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005120
5121 /* If write only, the seq_file is just a stub */
5122 if (m)
5123 kfree(m->private);
5124 kfree(m);
5125
5126 return 0;
5127}
5128
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005129static int tracing_buffers_open(struct inode *inode, struct file *filp);
5130static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5131 size_t count, loff_t *ppos);
5132static int tracing_buffers_release(struct inode *inode, struct file *file);
5133static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5134 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5135
5136static int snapshot_raw_open(struct inode *inode, struct file *filp)
5137{
5138 struct ftrace_buffer_info *info;
5139 int ret;
5140
5141 ret = tracing_buffers_open(inode, filp);
5142 if (ret < 0)
5143 return ret;
5144
5145 info = filp->private_data;
5146
5147 if (info->iter.trace->use_max_tr) {
5148 tracing_buffers_release(inode, filp);
5149 return -EBUSY;
5150 }
5151
5152 info->iter.snapshot = true;
5153 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5154
5155 return ret;
5156}
5157
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005158#endif /* CONFIG_TRACER_SNAPSHOT */
5159
5160
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005161static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005162 .open = tracing_open_generic,
5163 .read = tracing_max_lat_read,
5164 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005165 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005166};
5167
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005168static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005169 .open = tracing_open_generic,
5170 .read = tracing_set_trace_read,
5171 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005172 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005173};
5174
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005175static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005176 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005177 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005178 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005179 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005180 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005181 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005182};
5183
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005184static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005185 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005186 .read = tracing_entries_read,
5187 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005188 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005189 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005190};
5191
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005192static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005193 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005194 .read = tracing_total_entries_read,
5195 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005196 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005197};
5198
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005199static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005200 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005201 .write = tracing_free_buffer_write,
5202 .release = tracing_free_buffer_release,
5203};
5204
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005205static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005206 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005207 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005208 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005209 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005210};
5211
Zhaolei5079f322009-08-25 16:12:56 +08005212static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005213 .open = tracing_clock_open,
5214 .read = seq_read,
5215 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005216 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005217 .write = tracing_clock_write,
5218};
5219
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005220#ifdef CONFIG_TRACER_SNAPSHOT
5221static const struct file_operations snapshot_fops = {
5222 .open = tracing_snapshot_open,
5223 .read = seq_read,
5224 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005225 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005226 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005227};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005228
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005229static const struct file_operations snapshot_raw_fops = {
5230 .open = snapshot_raw_open,
5231 .read = tracing_buffers_read,
5232 .release = tracing_buffers_release,
5233 .splice_read = tracing_buffers_splice_read,
5234 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005235};
5236
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005237#endif /* CONFIG_TRACER_SNAPSHOT */
5238
Steven Rostedt2cadf912008-12-01 22:20:19 -05005239static int tracing_buffers_open(struct inode *inode, struct file *filp)
5240{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005241 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005242 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005243 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005244
5245 if (tracing_disabled)
5246 return -ENODEV;
5247
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005248 if (trace_array_get(tr) < 0)
5249 return -ENODEV;
5250
Steven Rostedt2cadf912008-12-01 22:20:19 -05005251 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005252 if (!info) {
5253 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005254 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005255 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005256
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005257 mutex_lock(&trace_types_lock);
5258
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005259 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005260 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005261 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005262 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005263 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005264 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005265 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005266
5267 filp->private_data = info;
5268
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005269 mutex_unlock(&trace_types_lock);
5270
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005271 ret = nonseekable_open(inode, filp);
5272 if (ret < 0)
5273 trace_array_put(tr);
5274
5275 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276}
5277
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005278static unsigned int
5279tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5280{
5281 struct ftrace_buffer_info *info = filp->private_data;
5282 struct trace_iterator *iter = &info->iter;
5283
5284 return trace_poll(iter, filp, poll_table);
5285}
5286
Steven Rostedt2cadf912008-12-01 22:20:19 -05005287static ssize_t
5288tracing_buffers_read(struct file *filp, char __user *ubuf,
5289 size_t count, loff_t *ppos)
5290{
5291 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005292 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005293 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005294 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005295
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005296 if (!count)
5297 return 0;
5298
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005299 mutex_lock(&trace_types_lock);
5300
5301#ifdef CONFIG_TRACER_MAX_TRACE
5302 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5303 size = -EBUSY;
5304 goto out_unlock;
5305 }
5306#endif
5307
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005308 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005309 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5310 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005311 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005312 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005313 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005314
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315 /* Do we have previous read data to read? */
5316 if (info->read < PAGE_SIZE)
5317 goto read;
5318
Steven Rostedtb6273442013-02-28 13:44:11 -05005319 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005320 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005321 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005322 &info->spare,
5323 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005324 iter->cpu_file, 0);
5325 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005326
5327 if (ret < 0) {
5328 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005329 if ((filp->f_flags & O_NONBLOCK)) {
5330 size = -EAGAIN;
5331 goto out_unlock;
5332 }
5333 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005334 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005335 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005336 if (ret) {
5337 size = ret;
5338 goto out_unlock;
5339 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005340 if (signal_pending(current)) {
5341 size = -EINTR;
5342 goto out_unlock;
5343 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005344 goto again;
5345 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005346 size = 0;
5347 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005348 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005349
Steven Rostedt436fc282011-10-14 10:44:25 -04005350 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005351 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005352 size = PAGE_SIZE - info->read;
5353 if (size > count)
5354 size = count;
5355
5356 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005357 if (ret == size) {
5358 size = -EFAULT;
5359 goto out_unlock;
5360 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005361 size -= ret;
5362
Steven Rostedt2cadf912008-12-01 22:20:19 -05005363 *ppos += size;
5364 info->read += size;
5365
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005366 out_unlock:
5367 mutex_unlock(&trace_types_lock);
5368
Steven Rostedt2cadf912008-12-01 22:20:19 -05005369 return size;
5370}
5371
5372static int tracing_buffers_release(struct inode *inode, struct file *file)
5373{
5374 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005375 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005376
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005377 mutex_lock(&trace_types_lock);
5378
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005379 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005380
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005381 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005382 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005383 kfree(info);
5384
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005385 mutex_unlock(&trace_types_lock);
5386
Steven Rostedt2cadf912008-12-01 22:20:19 -05005387 return 0;
5388}
5389
5390struct buffer_ref {
5391 struct ring_buffer *buffer;
5392 void *page;
5393 int ref;
5394};
5395
5396static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5397 struct pipe_buffer *buf)
5398{
5399 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5400
5401 if (--ref->ref)
5402 return;
5403
5404 ring_buffer_free_read_page(ref->buffer, ref->page);
5405 kfree(ref);
5406 buf->private = 0;
5407}
5408
Steven Rostedt2cadf912008-12-01 22:20:19 -05005409static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5410 struct pipe_buffer *buf)
5411{
5412 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5413
5414 ref->ref++;
5415}
5416
5417/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005418static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005419 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005420 .confirm = generic_pipe_buf_confirm,
5421 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005422 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005423 .get = buffer_pipe_buf_get,
5424};
5425
5426/*
5427 * Callback from splice_to_pipe(), if we need to release some pages
5428 * at the end of the spd in case we error'ed out in filling the pipe.
5429 */
5430static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5431{
5432 struct buffer_ref *ref =
5433 (struct buffer_ref *)spd->partial[i].private;
5434
5435 if (--ref->ref)
5436 return;
5437
5438 ring_buffer_free_read_page(ref->buffer, ref->page);
5439 kfree(ref);
5440 spd->partial[i].private = 0;
5441}
5442
5443static ssize_t
5444tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445 struct pipe_inode_info *pipe, size_t len,
5446 unsigned int flags)
5447{
5448 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005449 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005450 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5451 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005452 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005453 .pages = pages_def,
5454 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005455 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005456 .flags = flags,
5457 .ops = &buffer_pipe_buf_ops,
5458 .spd_release = buffer_spd_release,
5459 };
5460 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005461 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005462 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005463
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005464 mutex_lock(&trace_types_lock);
5465
5466#ifdef CONFIG_TRACER_MAX_TRACE
5467 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5468 ret = -EBUSY;
5469 goto out;
5470 }
5471#endif
5472
5473 if (splice_grow_spd(pipe, &spd)) {
5474 ret = -ENOMEM;
5475 goto out;
5476 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005477
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005478 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005479 ret = -EINVAL;
5480 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005481 }
5482
5483 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005484 if (len < PAGE_SIZE) {
5485 ret = -EINVAL;
5486 goto out;
5487 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005488 len &= PAGE_MASK;
5489 }
5490
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005491 again:
5492 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005493 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005494
Al Viroa786c062014-04-11 12:01:03 -04005495 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005496 struct page *page;
5497 int r;
5498
5499 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5500 if (!ref)
5501 break;
5502
Steven Rostedt7267fa62009-04-29 00:16:21 -04005503 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005504 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005505 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005506 if (!ref->page) {
5507 kfree(ref);
5508 break;
5509 }
5510
5511 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005512 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005513 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005514 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005515 kfree(ref);
5516 break;
5517 }
5518
5519 /*
5520 * zero out any left over data, this is going to
5521 * user land.
5522 */
5523 size = ring_buffer_page_len(ref->page);
5524 if (size < PAGE_SIZE)
5525 memset(ref->page + size, 0, PAGE_SIZE - size);
5526
5527 page = virt_to_page(ref->page);
5528
5529 spd.pages[i] = page;
5530 spd.partial[i].len = PAGE_SIZE;
5531 spd.partial[i].offset = 0;
5532 spd.partial[i].private = (unsigned long)ref;
5533 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005534 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005535
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005536 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005537 }
5538
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005539 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005540 spd.nr_pages = i;
5541
5542 /* did we read anything? */
5543 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005544 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005545 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005546 goto out;
5547 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005548 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005549 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005550 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005551 if (ret)
5552 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005553 if (signal_pending(current)) {
5554 ret = -EINTR;
5555 goto out;
5556 }
5557 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005558 }
5559
5560 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005561 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005562out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005563 mutex_unlock(&trace_types_lock);
5564
Steven Rostedt2cadf912008-12-01 22:20:19 -05005565 return ret;
5566}
5567
5568static const struct file_operations tracing_buffers_fops = {
5569 .open = tracing_buffers_open,
5570 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005571 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005572 .release = tracing_buffers_release,
5573 .splice_read = tracing_buffers_splice_read,
5574 .llseek = no_llseek,
5575};
5576
Steven Rostedtc8d77182009-04-29 18:03:45 -04005577static ssize_t
5578tracing_stats_read(struct file *filp, char __user *ubuf,
5579 size_t count, loff_t *ppos)
5580{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005581 struct inode *inode = file_inode(filp);
5582 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005583 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005584 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005585 struct trace_seq *s;
5586 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005587 unsigned long long t;
5588 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005589
Li Zefane4f2d102009-06-15 10:57:28 +08005590 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005591 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005592 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005593
5594 trace_seq_init(s);
5595
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005596 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005597 trace_seq_printf(s, "entries: %ld\n", cnt);
5598
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005599 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005600 trace_seq_printf(s, "overrun: %ld\n", cnt);
5601
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005602 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005603 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5604
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005605 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005606 trace_seq_printf(s, "bytes: %ld\n", cnt);
5607
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005608 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005609 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005610 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005611 usec_rem = do_div(t, USEC_PER_SEC);
5612 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5613 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005614
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005615 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005616 usec_rem = do_div(t, USEC_PER_SEC);
5617 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5618 } else {
5619 /* counter or tsc mode for trace_clock */
5620 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005621 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005622
5623 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005624 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005625 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005626
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005627 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005628 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5629
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005630 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005631 trace_seq_printf(s, "read events: %ld\n", cnt);
5632
Steven Rostedtc8d77182009-04-29 18:03:45 -04005633 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5634
5635 kfree(s);
5636
5637 return count;
5638}
5639
5640static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005641 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005642 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005643 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005644 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005645};
5646
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005647#ifdef CONFIG_DYNAMIC_FTRACE
5648
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005649int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005650{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005651 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005652}
5653
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005654static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005655tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005656 size_t cnt, loff_t *ppos)
5657{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005658 static char ftrace_dyn_info_buffer[1024];
5659 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005660 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005661 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005662 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005663 int r;
5664
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005665 mutex_lock(&dyn_info_mutex);
5666 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005667
Steven Rostedta26a2a22008-10-31 00:03:22 -04005668 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005669 buf[r++] = '\n';
5670
5671 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5672
5673 mutex_unlock(&dyn_info_mutex);
5674
5675 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005676}
5677
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005678static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005679 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005680 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005681 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005682};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005683#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005684
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005685#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5686static void
5687ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005688{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005689 tracing_snapshot();
5690}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005691
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005692static void
5693ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5694{
5695 unsigned long *count = (long *)data;
5696
5697 if (!*count)
5698 return;
5699
5700 if (*count != -1)
5701 (*count)--;
5702
5703 tracing_snapshot();
5704}
5705
5706static int
5707ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5708 struct ftrace_probe_ops *ops, void *data)
5709{
5710 long count = (long)data;
5711
5712 seq_printf(m, "%ps:", (void *)ip);
5713
5714 seq_printf(m, "snapshot");
5715
5716 if (count == -1)
5717 seq_printf(m, ":unlimited\n");
5718 else
5719 seq_printf(m, ":count=%ld\n", count);
5720
5721 return 0;
5722}
5723
5724static struct ftrace_probe_ops snapshot_probe_ops = {
5725 .func = ftrace_snapshot,
5726 .print = ftrace_snapshot_print,
5727};
5728
5729static struct ftrace_probe_ops snapshot_count_probe_ops = {
5730 .func = ftrace_count_snapshot,
5731 .print = ftrace_snapshot_print,
5732};
5733
5734static int
5735ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5736 char *glob, char *cmd, char *param, int enable)
5737{
5738 struct ftrace_probe_ops *ops;
5739 void *count = (void *)-1;
5740 char *number;
5741 int ret;
5742
5743 /* hash funcs only work with set_ftrace_filter */
5744 if (!enable)
5745 return -EINVAL;
5746
5747 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5748
5749 if (glob[0] == '!') {
5750 unregister_ftrace_function_probe_func(glob+1, ops);
5751 return 0;
5752 }
5753
5754 if (!param)
5755 goto out_reg;
5756
5757 number = strsep(&param, ":");
5758
5759 if (!strlen(number))
5760 goto out_reg;
5761
5762 /*
5763 * We use the callback data field (which is a pointer)
5764 * as our counter.
5765 */
5766 ret = kstrtoul(number, 0, (unsigned long *)&count);
5767 if (ret)
5768 return ret;
5769
5770 out_reg:
5771 ret = register_ftrace_function_probe(glob, ops, count);
5772
5773 if (ret >= 0)
5774 alloc_snapshot(&global_trace);
5775
5776 return ret < 0 ? ret : 0;
5777}
5778
5779static struct ftrace_func_command ftrace_snapshot_cmd = {
5780 .name = "snapshot",
5781 .func = ftrace_trace_snapshot_callback,
5782};
5783
Tom Zanussi38de93a2013-10-24 08:34:18 -05005784static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005785{
5786 return register_ftrace_command(&ftrace_snapshot_cmd);
5787}
5788#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005789static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005790#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005791
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005792struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005793{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005794 if (tr->dir)
5795 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005796
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005797 if (!debugfs_initialized())
5798 return NULL;
5799
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005800 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5801 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005802
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005803 if (!tr->dir)
5804 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005805
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005806 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005807}
5808
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005809struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005810{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005811 return tracing_init_dentry_tr(&global_trace);
5812}
5813
5814static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5815{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005816 struct dentry *d_tracer;
5817
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 if (tr->percpu_dir)
5819 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005820
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005821 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005822 if (!d_tracer)
5823 return NULL;
5824
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005826
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005827 WARN_ONCE(!tr->percpu_dir,
5828 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005829
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005830 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005831}
5832
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005833static struct dentry *
5834trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5835 void *data, long cpu, const struct file_operations *fops)
5836{
5837 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5838
5839 if (ret) /* See tracing_get_cpu() */
5840 ret->d_inode->i_cdev = (void *)(cpu + 1);
5841 return ret;
5842}
5843
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005844static void
5845tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005846{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005847 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005848 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005849 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005850
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005851 if (!d_percpu)
5852 return;
5853
Steven Rostedtdd49a382010-10-20 21:51:26 -04005854 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005855 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5856 if (!d_cpu) {
5857 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5858 return;
5859 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005860
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005861 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005862 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005863 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005864
5865 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005866 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005867 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005868
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005869 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005870 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005871
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005872 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005873 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005874
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005875 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005876 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005877
5878#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005879 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005880 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005881
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005882 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005883 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005884#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005885}
5886
Steven Rostedt60a11772008-05-12 21:20:44 +02005887#ifdef CONFIG_FTRACE_SELFTEST
5888/* Let selftest have access to static functions in this file */
5889#include "trace_selftest.c"
5890#endif
5891
Steven Rostedt577b7852009-02-26 23:43:05 -05005892struct trace_option_dentry {
5893 struct tracer_opt *opt;
5894 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005895 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005896 struct dentry *entry;
5897};
5898
5899static ssize_t
5900trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5901 loff_t *ppos)
5902{
5903 struct trace_option_dentry *topt = filp->private_data;
5904 char *buf;
5905
5906 if (topt->flags->val & topt->opt->bit)
5907 buf = "1\n";
5908 else
5909 buf = "0\n";
5910
5911 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5912}
5913
5914static ssize_t
5915trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5916 loff_t *ppos)
5917{
5918 struct trace_option_dentry *topt = filp->private_data;
5919 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005920 int ret;
5921
Peter Huewe22fe9b52011-06-07 21:58:27 +02005922 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5923 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005924 return ret;
5925
Li Zefan8d18eaa2009-12-08 11:17:06 +08005926 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005927 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005928
5929 if (!!(topt->flags->val & topt->opt->bit) != val) {
5930 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005931 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005932 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005933 mutex_unlock(&trace_types_lock);
5934 if (ret)
5935 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005936 }
5937
5938 *ppos += cnt;
5939
5940 return cnt;
5941}
5942
5943
5944static const struct file_operations trace_options_fops = {
5945 .open = tracing_open_generic,
5946 .read = trace_options_read,
5947 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005948 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005949};
5950
Steven Rostedta8259072009-02-26 22:19:12 -05005951static ssize_t
5952trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5953 loff_t *ppos)
5954{
5955 long index = (long)filp->private_data;
5956 char *buf;
5957
5958 if (trace_flags & (1 << index))
5959 buf = "1\n";
5960 else
5961 buf = "0\n";
5962
5963 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5964}
5965
5966static ssize_t
5967trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5968 loff_t *ppos)
5969{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005970 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005971 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005972 unsigned long val;
5973 int ret;
5974
Peter Huewe22fe9b52011-06-07 21:58:27 +02005975 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5976 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005977 return ret;
5978
Zhaoleif2d84b62009-08-07 18:55:48 +08005979 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005980 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005981
5982 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005983 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005984 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005985
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005986 if (ret < 0)
5987 return ret;
5988
Steven Rostedta8259072009-02-26 22:19:12 -05005989 *ppos += cnt;
5990
5991 return cnt;
5992}
5993
Steven Rostedta8259072009-02-26 22:19:12 -05005994static const struct file_operations trace_options_core_fops = {
5995 .open = tracing_open_generic,
5996 .read = trace_options_core_read,
5997 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005998 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005999};
6000
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006001struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006002 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006003 struct dentry *parent,
6004 void *data,
6005 const struct file_operations *fops)
6006{
6007 struct dentry *ret;
6008
6009 ret = debugfs_create_file(name, mode, parent, data, fops);
6010 if (!ret)
6011 pr_warning("Could not create debugfs '%s' entry\n", name);
6012
6013 return ret;
6014}
6015
6016
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006017static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006018{
6019 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006020
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006021 if (tr->options)
6022 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006023
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006024 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006025 if (!d_tracer)
6026 return NULL;
6027
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006028 tr->options = debugfs_create_dir("options", d_tracer);
6029 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006030 pr_warning("Could not create debugfs directory 'options'\n");
6031 return NULL;
6032 }
6033
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006034 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006035}
6036
Steven Rostedt577b7852009-02-26 23:43:05 -05006037static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006038create_trace_option_file(struct trace_array *tr,
6039 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006040 struct tracer_flags *flags,
6041 struct tracer_opt *opt)
6042{
6043 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006044
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006045 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006046 if (!t_options)
6047 return;
6048
6049 topt->flags = flags;
6050 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006051 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006052
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006053 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006054 &trace_options_fops);
6055
Steven Rostedt577b7852009-02-26 23:43:05 -05006056}
6057
6058static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006059create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006060{
6061 struct trace_option_dentry *topts;
6062 struct tracer_flags *flags;
6063 struct tracer_opt *opts;
6064 int cnt;
6065
6066 if (!tracer)
6067 return NULL;
6068
6069 flags = tracer->flags;
6070
6071 if (!flags || !flags->opts)
6072 return NULL;
6073
6074 opts = flags->opts;
6075
6076 for (cnt = 0; opts[cnt].name; cnt++)
6077 ;
6078
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006079 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006080 if (!topts)
6081 return NULL;
6082
6083 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006084 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006085 &opts[cnt]);
6086
6087 return topts;
6088}
6089
6090static void
6091destroy_trace_option_files(struct trace_option_dentry *topts)
6092{
6093 int cnt;
6094
6095 if (!topts)
6096 return;
6097
6098 for (cnt = 0; topts[cnt].opt; cnt++) {
6099 if (topts[cnt].entry)
6100 debugfs_remove(topts[cnt].entry);
6101 }
6102
6103 kfree(topts);
6104}
6105
Steven Rostedta8259072009-02-26 22:19:12 -05006106static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006107create_trace_option_core_file(struct trace_array *tr,
6108 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006109{
6110 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006111
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006112 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006113 if (!t_options)
6114 return NULL;
6115
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006116 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006117 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006118}
6119
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006120static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006121{
6122 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006123 int i;
6124
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006125 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006126 if (!t_options)
6127 return;
6128
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006129 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006130 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006131}
6132
Steven Rostedt499e5472012-02-22 15:50:28 -05006133static ssize_t
6134rb_simple_read(struct file *filp, char __user *ubuf,
6135 size_t cnt, loff_t *ppos)
6136{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006137 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006138 char buf[64];
6139 int r;
6140
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006141 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006142 r = sprintf(buf, "%d\n", r);
6143
6144 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6145}
6146
6147static ssize_t
6148rb_simple_write(struct file *filp, const char __user *ubuf,
6149 size_t cnt, loff_t *ppos)
6150{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006151 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006152 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006153 unsigned long val;
6154 int ret;
6155
6156 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6157 if (ret)
6158 return ret;
6159
6160 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006161 mutex_lock(&trace_types_lock);
6162 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006163 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006164 if (tr->current_trace->start)
6165 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006166 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006167 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006168 if (tr->current_trace->stop)
6169 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006170 }
6171 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006172 }
6173
6174 (*ppos)++;
6175
6176 return cnt;
6177}
6178
6179static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006180 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006181 .read = rb_simple_read,
6182 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006183 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006184 .llseek = default_llseek,
6185};
6186
Steven Rostedt277ba042012-08-03 16:10:49 -04006187struct dentry *trace_instance_dir;
6188
6189static void
6190init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6191
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006192static int
6193allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006194{
6195 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006196
6197 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6198
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006199 buf->tr = tr;
6200
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006201 buf->buffer = ring_buffer_alloc(size, rb_flags);
6202 if (!buf->buffer)
6203 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006204
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006205 buf->data = alloc_percpu(struct trace_array_cpu);
6206 if (!buf->data) {
6207 ring_buffer_free(buf->buffer);
6208 return -ENOMEM;
6209 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006210
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006211 /* Allocate the first page for all buffers */
6212 set_buffer_entries(&tr->trace_buffer,
6213 ring_buffer_size(tr->trace_buffer.buffer, 0));
6214
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006215 return 0;
6216}
6217
6218static int allocate_trace_buffers(struct trace_array *tr, int size)
6219{
6220 int ret;
6221
6222 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6223 if (ret)
6224 return ret;
6225
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006226#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006227 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6228 allocate_snapshot ? size : 1);
6229 if (WARN_ON(ret)) {
6230 ring_buffer_free(tr->trace_buffer.buffer);
6231 free_percpu(tr->trace_buffer.data);
6232 return -ENOMEM;
6233 }
6234 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006235
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006236 /*
6237 * Only the top level trace array gets its snapshot allocated
6238 * from the kernel command line.
6239 */
6240 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006241#endif
6242 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006243}
6244
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006245static void free_trace_buffer(struct trace_buffer *buf)
6246{
6247 if (buf->buffer) {
6248 ring_buffer_free(buf->buffer);
6249 buf->buffer = NULL;
6250 free_percpu(buf->data);
6251 buf->data = NULL;
6252 }
6253}
6254
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006255static void free_trace_buffers(struct trace_array *tr)
6256{
6257 if (!tr)
6258 return;
6259
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006260 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006261
6262#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006263 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006264#endif
6265}
6266
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006267static int new_instance_create(const char *name)
6268{
Steven Rostedt277ba042012-08-03 16:10:49 -04006269 struct trace_array *tr;
6270 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006271
6272 mutex_lock(&trace_types_lock);
6273
6274 ret = -EEXIST;
6275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6276 if (tr->name && strcmp(tr->name, name) == 0)
6277 goto out_unlock;
6278 }
6279
6280 ret = -ENOMEM;
6281 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6282 if (!tr)
6283 goto out_unlock;
6284
6285 tr->name = kstrdup(name, GFP_KERNEL);
6286 if (!tr->name)
6287 goto out_free_tr;
6288
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006289 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6290 goto out_free_tr;
6291
6292 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6293
Steven Rostedt277ba042012-08-03 16:10:49 -04006294 raw_spin_lock_init(&tr->start_lock);
6295
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006296 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6297
Steven Rostedt277ba042012-08-03 16:10:49 -04006298 tr->current_trace = &nop_trace;
6299
6300 INIT_LIST_HEAD(&tr->systems);
6301 INIT_LIST_HEAD(&tr->events);
6302
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006303 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006304 goto out_free_tr;
6305
Steven Rostedt277ba042012-08-03 16:10:49 -04006306 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6307 if (!tr->dir)
6308 goto out_free_tr;
6309
6310 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006311 if (ret) {
6312 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006313 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006314 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006315
6316 init_tracer_debugfs(tr, tr->dir);
6317
6318 list_add(&tr->list, &ftrace_trace_arrays);
6319
6320 mutex_unlock(&trace_types_lock);
6321
6322 return 0;
6323
6324 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006325 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006326 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006327 kfree(tr->name);
6328 kfree(tr);
6329
6330 out_unlock:
6331 mutex_unlock(&trace_types_lock);
6332
6333 return ret;
6334
6335}
6336
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006337static int instance_delete(const char *name)
6338{
6339 struct trace_array *tr;
6340 int found = 0;
6341 int ret;
6342
6343 mutex_lock(&trace_types_lock);
6344
6345 ret = -ENODEV;
6346 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6347 if (tr->name && strcmp(tr->name, name) == 0) {
6348 found = 1;
6349 break;
6350 }
6351 }
6352 if (!found)
6353 goto out_unlock;
6354
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006355 ret = -EBUSY;
6356 if (tr->ref)
6357 goto out_unlock;
6358
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006359 list_del(&tr->list);
6360
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006361 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006362 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006363 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006364 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006365 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006366
6367 kfree(tr->name);
6368 kfree(tr);
6369
6370 ret = 0;
6371
6372 out_unlock:
6373 mutex_unlock(&trace_types_lock);
6374
6375 return ret;
6376}
6377
Steven Rostedt277ba042012-08-03 16:10:49 -04006378static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6379{
6380 struct dentry *parent;
6381 int ret;
6382
6383 /* Paranoid: Make sure the parent is the "instances" directory */
6384 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6385 if (WARN_ON_ONCE(parent != trace_instance_dir))
6386 return -ENOENT;
6387
6388 /*
6389 * The inode mutex is locked, but debugfs_create_dir() will also
6390 * take the mutex. As the instances directory can not be destroyed
6391 * or changed in any other way, it is safe to unlock it, and
6392 * let the dentry try. If two users try to make the same dir at
6393 * the same time, then the new_instance_create() will determine the
6394 * winner.
6395 */
6396 mutex_unlock(&inode->i_mutex);
6397
6398 ret = new_instance_create(dentry->d_iname);
6399
6400 mutex_lock(&inode->i_mutex);
6401
6402 return ret;
6403}
6404
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006405static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6406{
6407 struct dentry *parent;
6408 int ret;
6409
6410 /* Paranoid: Make sure the parent is the "instances" directory */
6411 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6412 if (WARN_ON_ONCE(parent != trace_instance_dir))
6413 return -ENOENT;
6414
6415 /* The caller did a dget() on dentry */
6416 mutex_unlock(&dentry->d_inode->i_mutex);
6417
6418 /*
6419 * The inode mutex is locked, but debugfs_create_dir() will also
6420 * take the mutex. As the instances directory can not be destroyed
6421 * or changed in any other way, it is safe to unlock it, and
6422 * let the dentry try. If two users try to make the same dir at
6423 * the same time, then the instance_delete() will determine the
6424 * winner.
6425 */
6426 mutex_unlock(&inode->i_mutex);
6427
6428 ret = instance_delete(dentry->d_iname);
6429
6430 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6431 mutex_lock(&dentry->d_inode->i_mutex);
6432
6433 return ret;
6434}
6435
Steven Rostedt277ba042012-08-03 16:10:49 -04006436static const struct inode_operations instance_dir_inode_operations = {
6437 .lookup = simple_lookup,
6438 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006439 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006440};
6441
6442static __init void create_trace_instances(struct dentry *d_tracer)
6443{
6444 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6445 if (WARN_ON(!trace_instance_dir))
6446 return;
6447
6448 /* Hijack the dir inode operations, to allow mkdir */
6449 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6450}
6451
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006452static void
6453init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6454{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006455 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006456
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006457 trace_create_file("available_tracers", 0444, d_tracer,
6458 tr, &show_traces_fops);
6459
6460 trace_create_file("current_tracer", 0644, d_tracer,
6461 tr, &set_tracer_fops);
6462
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006463 trace_create_file("tracing_cpumask", 0644, d_tracer,
6464 tr, &tracing_cpumask_fops);
6465
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006466 trace_create_file("trace_options", 0644, d_tracer,
6467 tr, &tracing_iter_fops);
6468
6469 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006470 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006471
6472 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006473 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006474
6475 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006476 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006477
6478 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6479 tr, &tracing_total_entries_fops);
6480
Wang YanQing238ae932013-05-26 16:52:01 +08006481 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006482 tr, &tracing_free_buffer_fops);
6483
6484 trace_create_file("trace_marker", 0220, d_tracer,
6485 tr, &tracing_mark_fops);
6486
6487 trace_create_file("trace_clock", 0644, d_tracer, tr,
6488 &trace_clock_fops);
6489
6490 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006491 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006492
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006493#ifdef CONFIG_TRACER_MAX_TRACE
6494 trace_create_file("tracing_max_latency", 0644, d_tracer,
6495 &tr->max_latency, &tracing_max_lat_fops);
6496#endif
6497
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006498 if (ftrace_create_function_files(tr, d_tracer))
6499 WARN(1, "Could not allocate function filter files");
6500
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006501#ifdef CONFIG_TRACER_SNAPSHOT
6502 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006503 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006504#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006505
6506 for_each_tracing_cpu(cpu)
6507 tracing_init_debugfs_percpu(tr, cpu);
6508
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006509}
6510
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006511static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006512{
6513 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006514
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006515 trace_access_lock_init();
6516
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006517 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006518 if (!d_tracer)
6519 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006520
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006521 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006522
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006523 trace_create_file("tracing_thresh", 0644, d_tracer,
6524 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006525
Li Zefan339ae5d2009-04-17 10:34:30 +08006526 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006527 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006528
Avadh Patel69abe6a2009-04-10 16:04:48 -04006529 trace_create_file("saved_cmdlines", 0444, d_tracer,
6530 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006531
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006532 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6533 NULL, &tracing_saved_cmdlines_size_fops);
6534
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006535#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006536 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6537 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006538#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006539
Steven Rostedt277ba042012-08-03 16:10:49 -04006540 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006541
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006542 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006543
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006544 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006545}
6546
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006547static int trace_panic_handler(struct notifier_block *this,
6548 unsigned long event, void *unused)
6549{
Steven Rostedt944ac422008-10-23 19:26:08 -04006550 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006551 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006552 return NOTIFY_OK;
6553}
6554
6555static struct notifier_block trace_panic_notifier = {
6556 .notifier_call = trace_panic_handler,
6557 .next = NULL,
6558 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6559};
6560
6561static int trace_die_handler(struct notifier_block *self,
6562 unsigned long val,
6563 void *data)
6564{
6565 switch (val) {
6566 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006567 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006568 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006569 break;
6570 default:
6571 break;
6572 }
6573 return NOTIFY_OK;
6574}
6575
6576static struct notifier_block trace_die_notifier = {
6577 .notifier_call = trace_die_handler,
6578 .priority = 200
6579};
6580
6581/*
6582 * printk is set to max of 1024, we really don't need it that big.
6583 * Nothing should be printing 1000 characters anyway.
6584 */
6585#define TRACE_MAX_PRINT 1000
6586
6587/*
6588 * Define here KERN_TRACE so that we have one place to modify
6589 * it if we decide to change what log level the ftrace dump
6590 * should be at.
6591 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006592#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006593
Jason Wessel955b61e2010-08-05 09:22:23 -05006594void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006595trace_printk_seq(struct trace_seq *s)
6596{
6597 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006598 if (s->len >= TRACE_MAX_PRINT)
6599 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006600
6601 /* should be zero ended, but we are paranoid. */
6602 s->buffer[s->len] = 0;
6603
6604 printk(KERN_TRACE "%s", s->buffer);
6605
Steven Rostedtf9520752009-03-02 14:04:40 -05006606 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006607}
6608
Jason Wessel955b61e2010-08-05 09:22:23 -05006609void trace_init_global_iter(struct trace_iterator *iter)
6610{
6611 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006612 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006613 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006614 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006615
6616 if (iter->trace && iter->trace->open)
6617 iter->trace->open(iter);
6618
6619 /* Annotate start of buffers if we had overruns */
6620 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6621 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6622
6623 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6624 if (trace_clocks[iter->tr->clock_id].in_ns)
6625 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006626}
6627
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006628void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006629{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006630 /* use static because iter can be a bit big for the stack */
6631 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006632 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006633 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006634 unsigned long flags;
6635 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006636
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006637 /* Only allow one dump user at a time. */
6638 if (atomic_inc_return(&dump_running) != 1) {
6639 atomic_dec(&dump_running);
6640 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006641 }
6642
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006643 /*
6644 * Always turn off tracing when we dump.
6645 * We don't need to show trace output of what happens
6646 * between multiple crashes.
6647 *
6648 * If the user does a sysrq-z, then they can re-enable
6649 * tracing with echo 1 > tracing_on.
6650 */
6651 tracing_off();
6652
6653 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006654
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006655 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006656 trace_init_global_iter(&iter);
6657
Steven Rostedtd7690412008-10-01 00:29:53 -04006658 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006659 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006660 }
6661
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006662 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6663
Török Edwinb54d3de2008-11-22 13:28:48 +02006664 /* don't look at user memory in panic mode */
6665 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6666
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006667 switch (oops_dump_mode) {
6668 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006669 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006670 break;
6671 case DUMP_ORIG:
6672 iter.cpu_file = raw_smp_processor_id();
6673 break;
6674 case DUMP_NONE:
6675 goto out_enable;
6676 default:
6677 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006678 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006679 }
6680
6681 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006682
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006683 /* Did function tracer already get disabled? */
6684 if (ftrace_is_dead()) {
6685 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6686 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6687 }
6688
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006689 /*
6690 * We need to stop all tracing on all CPUS to read the
6691 * the next buffer. This is a bit expensive, but is
6692 * not done often. We fill all what we can read,
6693 * and then release the locks again.
6694 */
6695
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006696 while (!trace_empty(&iter)) {
6697
6698 if (!cnt)
6699 printk(KERN_TRACE "---------------------------------\n");
6700
6701 cnt++;
6702
6703 /* reset all but tr, trace, and overruns */
6704 memset(&iter.seq, 0,
6705 sizeof(struct trace_iterator) -
6706 offsetof(struct trace_iterator, seq));
6707 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6708 iter.pos = -1;
6709
Jason Wessel955b61e2010-08-05 09:22:23 -05006710 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006711 int ret;
6712
6713 ret = print_trace_line(&iter);
6714 if (ret != TRACE_TYPE_NO_CONSUME)
6715 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006716 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006717 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006718
6719 trace_printk_seq(&iter.seq);
6720 }
6721
6722 if (!cnt)
6723 printk(KERN_TRACE " (ftrace buffer empty)\n");
6724 else
6725 printk(KERN_TRACE "---------------------------------\n");
6726
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006727 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006728 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006729
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006730 for_each_tracing_cpu(cpu) {
6731 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006732 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006733 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006734 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006735}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006736EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006737
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006738__init static int tracer_alloc_buffers(void)
6739{
Steven Rostedt73c51622009-03-11 13:42:01 -04006740 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306741 int ret = -ENOMEM;
6742
David Sharp750912f2010-12-08 13:46:47 -08006743
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306744 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6745 goto out;
6746
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006747 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306748 goto out_free_buffer_mask;
6749
Steven Rostedt07d777f2011-09-22 14:01:55 -04006750 /* Only allocate trace_printk buffers if a trace_printk exists */
6751 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006752 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006753 trace_printk_init_buffers();
6754
Steven Rostedt73c51622009-03-11 13:42:01 -04006755 /* To save memory, keep the ring buffer size to its minimum */
6756 if (ring_buffer_expanded)
6757 ring_buf_size = trace_buf_size;
6758 else
6759 ring_buf_size = 1;
6760
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306761 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006762 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006763
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006764 raw_spin_lock_init(&global_trace.start_lock);
6765
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006766 /* Used for event triggers */
6767 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6768 if (!temp_buffer)
6769 goto out_free_cpumask;
6770
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006771 if (trace_create_savedcmd() < 0)
6772 goto out_free_temp_buffer;
6773
Steven Rostedtab464282008-05-12 21:21:00 +02006774 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006775 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006776 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6777 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006778 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006779 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006780
Steven Rostedt499e5472012-02-22 15:50:28 -05006781 if (global_trace.buffer_disabled)
6782 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006783
Steven Rostedte1e232c2014-02-10 23:38:46 -05006784 if (trace_boot_clock) {
6785 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6786 if (ret < 0)
6787 pr_warning("Trace clock %s not defined, going back to default\n",
6788 trace_boot_clock);
6789 }
6790
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006791 /*
6792 * register_tracer() might reference current_trace, so it
6793 * needs to be set before we register anything. This is
6794 * just a bootstrap of current_trace anyway.
6795 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006796 global_trace.current_trace = &nop_trace;
6797
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006798 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6799
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006800 ftrace_init_global_array_ops(&global_trace);
6801
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006802 register_tracer(&nop_trace);
6803
Steven Rostedt60a11772008-05-12 21:20:44 +02006804 /* All seems OK, enable tracing */
6805 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006806
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006807 atomic_notifier_chain_register(&panic_notifier_list,
6808 &trace_panic_notifier);
6809
6810 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006811
Steven Rostedtae63b312012-05-03 23:09:03 -04006812 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6813
6814 INIT_LIST_HEAD(&global_trace.systems);
6815 INIT_LIST_HEAD(&global_trace.events);
6816 list_add(&global_trace.list, &ftrace_trace_arrays);
6817
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006818 while (trace_boot_options) {
6819 char *option;
6820
6821 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006822 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006823 }
6824
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006825 register_snapshot_cmd();
6826
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006827 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006828
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006829out_free_savedcmd:
6830 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006831out_free_temp_buffer:
6832 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306833out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006834 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306835out_free_buffer_mask:
6836 free_cpumask_var(tracing_buffer_mask);
6837out:
6838 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006839}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006840
6841__init static int clear_boot_tracer(void)
6842{
6843 /*
6844 * The default tracer at boot buffer is an init section.
6845 * This function is called in lateinit. If we did not
6846 * find the boot tracer, then clear it out, to prevent
6847 * later registration from accessing the buffer that is
6848 * about to be freed.
6849 */
6850 if (!default_bootup_tracer)
6851 return 0;
6852
6853 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6854 default_bootup_tracer);
6855 default_bootup_tracer = NULL;
6856
6857 return 0;
6858}
6859
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006860early_initcall(tracer_alloc_buffers);
6861fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006862late_initcall(clear_boot_tracer);