blob: 1855dae73f346ad0e1f781940a57cc156dd49360 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04001088static void wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1092 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001288#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001290static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1291static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1292static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1293static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001294static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001295
Steven Rostedt25b0b442008-05-12 21:21:00 +02001296/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001297static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298
1299static void trace_init_cmdlines(void)
1300{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001301 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1302 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001303 cmdline_idx = 0;
1304}
1305
Carsten Emdeb5130b12009-09-13 01:43:07 +02001306int is_tracing_stopped(void)
1307{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001308 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001309}
1310
Steven Rostedt0f048702008-11-05 16:05:44 -05001311/**
1312 * tracing_start - quick start of the tracer
1313 *
1314 * If tracing is enabled but was stopped by tracing_stop,
1315 * this will start the tracer back up.
1316 */
1317void tracing_start(void)
1318{
1319 struct ring_buffer *buffer;
1320 unsigned long flags;
1321
1322 if (tracing_disabled)
1323 return;
1324
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001325 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1326 if (--global_trace.stop_count) {
1327 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001328 /* Someone screwed up their debugging */
1329 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001330 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001331 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001332 goto out;
1333 }
1334
Steven Rostedta2f80712010-03-12 19:56:00 -05001335 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001336 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001337
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001338 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001339 if (buffer)
1340 ring_buffer_record_enable(buffer);
1341
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001342#ifdef CONFIG_TRACER_MAX_TRACE
1343 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001344 if (buffer)
1345 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001347
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001348 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001349
Steven Rostedt0f048702008-11-05 16:05:44 -05001350 ftrace_start();
1351 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001352 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1353}
1354
1355static void tracing_start_tr(struct trace_array *tr)
1356{
1357 struct ring_buffer *buffer;
1358 unsigned long flags;
1359
1360 if (tracing_disabled)
1361 return;
1362
1363 /* If global, we need to also start the max tracer */
1364 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1365 return tracing_start();
1366
1367 raw_spin_lock_irqsave(&tr->start_lock, flags);
1368
1369 if (--tr->stop_count) {
1370 if (tr->stop_count < 0) {
1371 /* Someone screwed up their debugging */
1372 WARN_ON_ONCE(1);
1373 tr->stop_count = 0;
1374 }
1375 goto out;
1376 }
1377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001378 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 if (buffer)
1380 ring_buffer_record_enable(buffer);
1381
1382 out:
1383 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001384}
1385
1386/**
1387 * tracing_stop - quick stop of the tracer
1388 *
1389 * Light weight way to stop tracing. Use in conjunction with
1390 * tracing_start.
1391 */
1392void tracing_stop(void)
1393{
1394 struct ring_buffer *buffer;
1395 unsigned long flags;
1396
1397 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001398 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1399 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001400 goto out;
1401
Steven Rostedta2f80712010-03-12 19:56:00 -05001402 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001403 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001404
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001406 if (buffer)
1407 ring_buffer_record_disable(buffer);
1408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001409#ifdef CONFIG_TRACER_MAX_TRACE
1410 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001411 if (buffer)
1412 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001414
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001415 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001416
Steven Rostedt0f048702008-11-05 16:05:44 -05001417 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001418 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1419}
1420
1421static void tracing_stop_tr(struct trace_array *tr)
1422{
1423 struct ring_buffer *buffer;
1424 unsigned long flags;
1425
1426 /* If global, we need to also stop the max tracer */
1427 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1428 return tracing_stop();
1429
1430 raw_spin_lock_irqsave(&tr->start_lock, flags);
1431 if (tr->stop_count++)
1432 goto out;
1433
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001434 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001435 if (buffer)
1436 ring_buffer_record_disable(buffer);
1437
1438 out:
1439 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001440}
1441
Ingo Molnare309b412008-05-12 21:20:51 +02001442void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001443
Ingo Molnare309b412008-05-12 21:20:51 +02001444static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001445{
Carsten Emdea635cf02009-03-18 09:00:41 +01001446 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001447
1448 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1449 return;
1450
1451 /*
1452 * It's not the end of the world if we don't get
1453 * the lock, but we also don't want to spin
1454 * nor do we want to disable interrupts,
1455 * so if we miss here, then better luck next time.
1456 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001457 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001458 return;
1459
1460 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001461 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001462 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1463
Carsten Emdea635cf02009-03-18 09:00:41 +01001464 /*
1465 * Check whether the cmdline buffer at idx has a pid
1466 * mapped. We are going to overwrite that entry so we
1467 * need to clear the map_pid_to_cmdline. Otherwise we
1468 * would read the new comm for the old pid.
1469 */
1470 pid = map_cmdline_to_pid[idx];
1471 if (pid != NO_CMDLINE_MAP)
1472 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001473
Carsten Emdea635cf02009-03-18 09:00:41 +01001474 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475 map_pid_to_cmdline[tsk->pid] = idx;
1476
1477 cmdline_idx = idx;
1478 }
1479
1480 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1481
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001482 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483}
1484
Steven Rostedt4ca53082009-03-16 19:20:15 -04001485void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001487 unsigned map;
1488
Steven Rostedt4ca53082009-03-16 19:20:15 -04001489 if (!pid) {
1490 strcpy(comm, "<idle>");
1491 return;
1492 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493
Steven Rostedt74bf4072010-01-25 15:11:53 -05001494 if (WARN_ON_ONCE(pid < 0)) {
1495 strcpy(comm, "<XXX>");
1496 return;
1497 }
1498
Steven Rostedt4ca53082009-03-16 19:20:15 -04001499 if (pid > PID_MAX_DEFAULT) {
1500 strcpy(comm, "<...>");
1501 return;
1502 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001503
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001504 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001505 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001506 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001507 if (map != NO_CMDLINE_MAP)
1508 strcpy(comm, saved_cmdlines[map]);
1509 else
1510 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001512 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001513 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514}
1515
Ingo Molnare309b412008-05-12 21:20:51 +02001516void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001518 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519 return;
1520
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001521 if (!__this_cpu_read(trace_cmdline_save))
1522 return;
1523
1524 __this_cpu_write(trace_cmdline_save, false);
1525
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 trace_save_cmdline(tsk);
1527}
1528
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001529void
Steven Rostedt38697052008-10-01 13:14:09 -04001530tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1531 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532{
1533 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534
Steven Rostedt777e2082008-09-29 23:02:42 -04001535 entry->preempt_count = pc & 0xff;
1536 entry->pid = (tsk) ? tsk->pid : 0;
1537 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001538#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001539 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001540#else
1541 TRACE_FLAG_IRQS_NOSUPPORT |
1542#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1544 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001545 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1546 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001548EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001549
Steven Rostedte77405a2009-09-02 14:17:06 -04001550struct ring_buffer_event *
1551trace_buffer_lock_reserve(struct ring_buffer *buffer,
1552 int type,
1553 unsigned long len,
1554 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001555{
1556 struct ring_buffer_event *event;
1557
Steven Rostedte77405a2009-09-02 14:17:06 -04001558 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001559 if (event != NULL) {
1560 struct trace_entry *ent = ring_buffer_event_data(event);
1561
1562 tracing_generic_entry_update(ent, flags, pc);
1563 ent->type = type;
1564 }
1565
1566 return event;
1567}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001568
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001569void
1570__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1571{
1572 __this_cpu_write(trace_cmdline_save, true);
1573 ring_buffer_unlock_commit(buffer, event);
1574}
1575
Steven Rostedte77405a2009-09-02 14:17:06 -04001576static inline void
1577__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1578 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001579 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001580{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001581 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001582
Steven Rostedte77405a2009-09-02 14:17:06 -04001583 ftrace_trace_stack(buffer, flags, 6, pc);
1584 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001585}
1586
Steven Rostedte77405a2009-09-02 14:17:06 -04001587void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1588 struct ring_buffer_event *event,
1589 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001590{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001591 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001592}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001593EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001594
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001595static struct ring_buffer *temp_buffer;
1596
Steven Rostedtef5580d2009-02-27 19:38:04 -05001597struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001598trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1599 struct ftrace_event_file *ftrace_file,
1600 int type, unsigned long len,
1601 unsigned long flags, int pc)
1602{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001603 struct ring_buffer_event *entry;
1604
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001605 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001606 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001607 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001608 /*
1609 * If tracing is off, but we have triggers enabled
1610 * we still need to look at the event data. Use the temp_buffer
1611 * to store the trace event for the tigger to use. It's recusive
1612 * safe and will not be recorded anywhere.
1613 */
1614 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1615 *current_rb = temp_buffer;
1616 entry = trace_buffer_lock_reserve(*current_rb,
1617 type, len, flags, pc);
1618 }
1619 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001620}
1621EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1622
1623struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001624trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1625 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001626 unsigned long flags, int pc)
1627{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001628 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001629 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001630 type, len, flags, pc);
1631}
Steven Rostedt94487d62009-05-05 19:22:53 -04001632EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001633
Steven Rostedte77405a2009-09-02 14:17:06 -04001634void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001636 unsigned long flags, int pc)
1637{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001639}
Steven Rostedt94487d62009-05-05 19:22:53 -04001640EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001641
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001642void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1643 struct ring_buffer_event *event,
1644 unsigned long flags, int pc,
1645 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001646{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001647 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001648
1649 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1650 ftrace_trace_userstack(buffer, flags, pc);
1651}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001653
Steven Rostedte77405a2009-09-02 14:17:06 -04001654void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1655 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001656{
Steven Rostedte77405a2009-09-02 14:17:06 -04001657 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001658}
Steven Rostedt12acd472009-04-17 16:01:56 -04001659EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001660
Ingo Molnare309b412008-05-12 21:20:51 +02001661void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001662trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001663 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1664 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001665{
Tom Zanussie1112b42009-03-31 00:48:49 -05001666 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001667 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001668 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001669 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001670
Steven Rostedtd7690412008-10-01 00:29:53 -04001671 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001672 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001673 return;
1674
Steven Rostedte77405a2009-09-02 14:17:06 -04001675 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001676 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001677 if (!event)
1678 return;
1679 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001680 entry->ip = ip;
1681 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001682
Tom Zanussif306cc82013-10-24 08:34:17 -05001683 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001684 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001685}
1686
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001687#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001688
1689#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1690struct ftrace_stack {
1691 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1692};
1693
1694static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1695static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1696
Steven Rostedte77405a2009-09-02 14:17:06 -04001697static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001698 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001699 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001700{
Tom Zanussie1112b42009-03-31 00:48:49 -05001701 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001702 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001703 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001704 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001705 int use_stack;
1706 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001707
1708 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001709 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001710
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001711 /*
1712 * Since events can happen in NMIs there's no safe way to
1713 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1714 * or NMI comes in, it will just have to use the default
1715 * FTRACE_STACK_SIZE.
1716 */
1717 preempt_disable_notrace();
1718
Shan Wei82146522012-11-19 13:21:01 +08001719 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001720 /*
1721 * We don't need any atomic variables, just a barrier.
1722 * If an interrupt comes in, we don't care, because it would
1723 * have exited and put the counter back to what we want.
1724 * We just need a barrier to keep gcc from moving things
1725 * around.
1726 */
1727 barrier();
1728 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001729 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001730 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1731
1732 if (regs)
1733 save_stack_trace_regs(regs, &trace);
1734 else
1735 save_stack_trace(&trace);
1736
1737 if (trace.nr_entries > size)
1738 size = trace.nr_entries;
1739 } else
1740 /* From now on, use_stack is a boolean */
1741 use_stack = 0;
1742
1743 size *= sizeof(unsigned long);
1744
1745 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1746 sizeof(*entry) + size, flags, pc);
1747 if (!event)
1748 goto out;
1749 entry = ring_buffer_event_data(event);
1750
1751 memset(&entry->caller, 0, size);
1752
1753 if (use_stack)
1754 memcpy(&entry->caller, trace.entries,
1755 trace.nr_entries * sizeof(unsigned long));
1756 else {
1757 trace.max_entries = FTRACE_STACK_ENTRIES;
1758 trace.entries = entry->caller;
1759 if (regs)
1760 save_stack_trace_regs(regs, &trace);
1761 else
1762 save_stack_trace(&trace);
1763 }
1764
1765 entry->size = trace.nr_entries;
1766
Tom Zanussif306cc82013-10-24 08:34:17 -05001767 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001768 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001769
1770 out:
1771 /* Again, don't let gcc optimize things here */
1772 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001773 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001774 preempt_enable_notrace();
1775
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001776}
1777
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001778void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1779 int skip, int pc, struct pt_regs *regs)
1780{
1781 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1782 return;
1783
1784 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1785}
1786
Steven Rostedte77405a2009-09-02 14:17:06 -04001787void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1788 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001789{
1790 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1791 return;
1792
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001793 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001794}
1795
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001796void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1797 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001798{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001799 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001800}
1801
Steven Rostedt03889382009-12-11 09:48:22 -05001802/**
1803 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001804 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001805 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001806void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001807{
1808 unsigned long flags;
1809
1810 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001811 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001812
1813 local_save_flags(flags);
1814
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001815 /*
1816 * Skip 3 more, seems to get us at the caller of
1817 * this function.
1818 */
1819 skip += 3;
1820 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1821 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001822}
1823
Steven Rostedt91e86e52010-11-10 12:56:12 +01001824static DEFINE_PER_CPU(int, user_stack_count);
1825
Steven Rostedte77405a2009-09-02 14:17:06 -04001826void
1827ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001828{
Tom Zanussie1112b42009-03-31 00:48:49 -05001829 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001830 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001831 struct userstack_entry *entry;
1832 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001833
1834 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1835 return;
1836
Steven Rostedtb6345872010-03-12 20:03:30 -05001837 /*
1838 * NMIs can not handle page faults, even with fix ups.
1839 * The save user stack can (and often does) fault.
1840 */
1841 if (unlikely(in_nmi()))
1842 return;
1843
Steven Rostedt91e86e52010-11-10 12:56:12 +01001844 /*
1845 * prevent recursion, since the user stack tracing may
1846 * trigger other kernel events.
1847 */
1848 preempt_disable();
1849 if (__this_cpu_read(user_stack_count))
1850 goto out;
1851
1852 __this_cpu_inc(user_stack_count);
1853
Steven Rostedte77405a2009-09-02 14:17:06 -04001854 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001855 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001856 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001857 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001858 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001859
Steven Rostedt48659d32009-09-11 11:36:23 -04001860 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001861 memset(&entry->caller, 0, sizeof(entry->caller));
1862
1863 trace.nr_entries = 0;
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.skip = 0;
1866 trace.entries = entry->caller;
1867
1868 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001869 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001870 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001871
Li Zefan1dbd1952010-12-09 15:47:56 +08001872 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001873 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001874 out:
1875 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001876}
1877
Hannes Eder4fd27352009-02-10 19:44:12 +01001878#ifdef UNUSED
1879static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001880{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001881 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001882}
Hannes Eder4fd27352009-02-10 19:44:12 +01001883#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001884
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001885#endif /* CONFIG_STACKTRACE */
1886
Steven Rostedt07d777f2011-09-22 14:01:55 -04001887/* created for use with alloc_percpu */
1888struct trace_buffer_struct {
1889 char buffer[TRACE_BUF_SIZE];
1890};
1891
1892static struct trace_buffer_struct *trace_percpu_buffer;
1893static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1894static struct trace_buffer_struct *trace_percpu_irq_buffer;
1895static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1896
1897/*
1898 * The buffer used is dependent on the context. There is a per cpu
1899 * buffer for normal context, softirq contex, hard irq context and
1900 * for NMI context. Thise allows for lockless recording.
1901 *
1902 * Note, if the buffers failed to be allocated, then this returns NULL
1903 */
1904static char *get_trace_buf(void)
1905{
1906 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001907
1908 /*
1909 * If we have allocated per cpu buffers, then we do not
1910 * need to do any locking.
1911 */
1912 if (in_nmi())
1913 percpu_buffer = trace_percpu_nmi_buffer;
1914 else if (in_irq())
1915 percpu_buffer = trace_percpu_irq_buffer;
1916 else if (in_softirq())
1917 percpu_buffer = trace_percpu_sirq_buffer;
1918 else
1919 percpu_buffer = trace_percpu_buffer;
1920
1921 if (!percpu_buffer)
1922 return NULL;
1923
Shan Weid8a03492012-11-13 09:53:04 +08001924 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001925}
1926
1927static int alloc_percpu_trace_buffer(void)
1928{
1929 struct trace_buffer_struct *buffers;
1930 struct trace_buffer_struct *sirq_buffers;
1931 struct trace_buffer_struct *irq_buffers;
1932 struct trace_buffer_struct *nmi_buffers;
1933
1934 buffers = alloc_percpu(struct trace_buffer_struct);
1935 if (!buffers)
1936 goto err_warn;
1937
1938 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1939 if (!sirq_buffers)
1940 goto err_sirq;
1941
1942 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1943 if (!irq_buffers)
1944 goto err_irq;
1945
1946 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1947 if (!nmi_buffers)
1948 goto err_nmi;
1949
1950 trace_percpu_buffer = buffers;
1951 trace_percpu_sirq_buffer = sirq_buffers;
1952 trace_percpu_irq_buffer = irq_buffers;
1953 trace_percpu_nmi_buffer = nmi_buffers;
1954
1955 return 0;
1956
1957 err_nmi:
1958 free_percpu(irq_buffers);
1959 err_irq:
1960 free_percpu(sirq_buffers);
1961 err_sirq:
1962 free_percpu(buffers);
1963 err_warn:
1964 WARN(1, "Could not allocate percpu trace_printk buffer");
1965 return -ENOMEM;
1966}
1967
Steven Rostedt81698832012-10-11 10:15:05 -04001968static int buffers_allocated;
1969
Steven Rostedt07d777f2011-09-22 14:01:55 -04001970void trace_printk_init_buffers(void)
1971{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001972 if (buffers_allocated)
1973 return;
1974
1975 if (alloc_percpu_trace_buffer())
1976 return;
1977
Steven Rostedt2184db42014-05-28 13:14:40 -04001978 /* trace_printk() is for debug use only. Don't use it in production. */
1979
1980 pr_warning("\n**********************************************************\n");
1981 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1982 pr_warning("** **\n");
1983 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
1984 pr_warning("** **\n");
1985 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
1986 pr_warning("** unsafe for produciton use. **\n");
1987 pr_warning("** **\n");
1988 pr_warning("** If you see this message and you are not debugging **\n");
1989 pr_warning("** the kernel, report this immediately to your vendor! **\n");
1990 pr_warning("** **\n");
1991 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1992 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04001993
Steven Rostedtb382ede62012-10-10 21:44:34 -04001994 /* Expand the buffers to set size */
1995 tracing_update_buffers();
1996
Steven Rostedt07d777f2011-09-22 14:01:55 -04001997 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001998
1999 /*
2000 * trace_printk_init_buffers() can be called by modules.
2001 * If that happens, then we need to start cmdline recording
2002 * directly here. If the global_trace.buffer is already
2003 * allocated here, then this was called by module code.
2004 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002005 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002006 tracing_start_cmdline_record();
2007}
2008
2009void trace_printk_start_comm(void)
2010{
2011 /* Start tracing comms if trace printk is set */
2012 if (!buffers_allocated)
2013 return;
2014 tracing_start_cmdline_record();
2015}
2016
2017static void trace_printk_start_stop_comm(int enabled)
2018{
2019 if (!buffers_allocated)
2020 return;
2021
2022 if (enabled)
2023 tracing_start_cmdline_record();
2024 else
2025 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002026}
2027
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002028/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002029 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002030 *
2031 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002032int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002033{
Tom Zanussie1112b42009-03-31 00:48:49 -05002034 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002035 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002036 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002037 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002038 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002039 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002040 char *tbuffer;
2041 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002042
2043 if (unlikely(tracing_selftest_running || tracing_disabled))
2044 return 0;
2045
2046 /* Don't pollute graph traces with trace_vprintk internals */
2047 pause_graph_tracing();
2048
2049 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002050 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002051
Steven Rostedt07d777f2011-09-22 14:01:55 -04002052 tbuffer = get_trace_buf();
2053 if (!tbuffer) {
2054 len = 0;
2055 goto out;
2056 }
2057
2058 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2059
2060 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002061 goto out;
2062
Steven Rostedt07d777f2011-09-22 14:01:55 -04002063 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002064 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002065 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002066 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2067 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002068 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002069 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002070 entry = ring_buffer_event_data(event);
2071 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002072 entry->fmt = fmt;
2073
Steven Rostedt07d777f2011-09-22 14:01:55 -04002074 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002075 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002076 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002077 ftrace_trace_stack(buffer, flags, 6, pc);
2078 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002079
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002080out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002081 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002082 unpause_graph_tracing();
2083
2084 return len;
2085}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002086EXPORT_SYMBOL_GPL(trace_vbprintk);
2087
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002088static int
2089__trace_array_vprintk(struct ring_buffer *buffer,
2090 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002091{
Tom Zanussie1112b42009-03-31 00:48:49 -05002092 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002093 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002094 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 unsigned long flags;
2097 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002098
2099 if (tracing_disabled || tracing_selftest_running)
2100 return 0;
2101
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102 /* Don't pollute graph traces with trace_vprintk internals */
2103 pause_graph_tracing();
2104
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002105 pc = preempt_count();
2106 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002107
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108
2109 tbuffer = get_trace_buf();
2110 if (!tbuffer) {
2111 len = 0;
2112 goto out;
2113 }
2114
2115 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2116 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002117 goto out;
2118
Steven Rostedt07d777f2011-09-22 14:01:55 -04002119 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002120 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002121 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002123 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002124 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002125 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002126 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002127
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002129 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002130 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002131 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002132 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002133 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002134 out:
2135 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002136 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137
2138 return len;
2139}
Steven Rostedt659372d2009-09-03 19:11:07 -04002140
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002141int trace_array_vprintk(struct trace_array *tr,
2142 unsigned long ip, const char *fmt, va_list args)
2143{
2144 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2145}
2146
2147int trace_array_printk(struct trace_array *tr,
2148 unsigned long ip, const char *fmt, ...)
2149{
2150 int ret;
2151 va_list ap;
2152
2153 if (!(trace_flags & TRACE_ITER_PRINTK))
2154 return 0;
2155
2156 va_start(ap, fmt);
2157 ret = trace_array_vprintk(tr, ip, fmt, ap);
2158 va_end(ap);
2159 return ret;
2160}
2161
2162int trace_array_printk_buf(struct ring_buffer *buffer,
2163 unsigned long ip, const char *fmt, ...)
2164{
2165 int ret;
2166 va_list ap;
2167
2168 if (!(trace_flags & TRACE_ITER_PRINTK))
2169 return 0;
2170
2171 va_start(ap, fmt);
2172 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2173 va_end(ap);
2174 return ret;
2175}
2176
Steven Rostedt659372d2009-09-03 19:11:07 -04002177int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2178{
Steven Rostedta813a152009-10-09 01:41:35 -04002179 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002180}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002181EXPORT_SYMBOL_GPL(trace_vprintk);
2182
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002183static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002184{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002185 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2186
Steven Rostedt5a90f572008-09-03 17:42:51 -04002187 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002188 if (buf_iter)
2189 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002190}
2191
Ingo Molnare309b412008-05-12 21:20:51 +02002192static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002193peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2194 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002195{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002196 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002197 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002198
Steven Rostedtd7690412008-10-01 00:29:53 -04002199 if (buf_iter)
2200 event = ring_buffer_iter_peek(buf_iter, ts);
2201 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002202 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002203 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002204
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002205 if (event) {
2206 iter->ent_size = ring_buffer_event_length(event);
2207 return ring_buffer_event_data(event);
2208 }
2209 iter->ent_size = 0;
2210 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002211}
Steven Rostedtd7690412008-10-01 00:29:53 -04002212
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002213static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002214__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2215 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002216{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002217 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002218 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002219 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002220 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002221 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002222 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002223 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002224 int cpu;
2225
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002226 /*
2227 * If we are in a per_cpu trace file, don't bother by iterating over
2228 * all cpu and peek directly.
2229 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002230 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002231 if (ring_buffer_empty_cpu(buffer, cpu_file))
2232 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002233 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002234 if (ent_cpu)
2235 *ent_cpu = cpu_file;
2236
2237 return ent;
2238 }
2239
Steven Rostedtab464282008-05-12 21:21:00 +02002240 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002241
2242 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002243 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002244
Steven Rostedtbc21b472010-03-31 19:49:26 -04002245 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002246
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002247 /*
2248 * Pick the entry with the smallest timestamp:
2249 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002250 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002251 next = ent;
2252 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002253 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002254 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002255 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002256 }
2257 }
2258
Steven Rostedt12b5da32012-03-27 10:43:28 -04002259 iter->ent_size = next_size;
2260
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002261 if (ent_cpu)
2262 *ent_cpu = next_cpu;
2263
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002264 if (ent_ts)
2265 *ent_ts = next_ts;
2266
Steven Rostedtbc21b472010-03-31 19:49:26 -04002267 if (missing_events)
2268 *missing_events = next_lost;
2269
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270 return next;
2271}
2272
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002273/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002274struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2275 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002276{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002277 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002278}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002279
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002280/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002281void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002282{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002283 iter->ent = __find_next_entry(iter, &iter->cpu,
2284 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002285
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002286 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002287 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002288
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002289 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002290}
2291
Ingo Molnare309b412008-05-12 21:20:51 +02002292static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002293{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002294 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002295 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002296}
2297
Ingo Molnare309b412008-05-12 21:20:51 +02002298static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299{
2300 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002302 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002303
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002304 WARN_ON_ONCE(iter->leftover);
2305
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002306 (*pos)++;
2307
2308 /* can't go backwards */
2309 if (iter->idx > i)
2310 return NULL;
2311
2312 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002313 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002314 else
2315 ent = iter;
2316
2317 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002318 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002319
2320 iter->pos = *pos;
2321
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002322 return ent;
2323}
2324
Jason Wessel955b61e2010-08-05 09:22:23 -05002325void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002326{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002327 struct ring_buffer_event *event;
2328 struct ring_buffer_iter *buf_iter;
2329 unsigned long entries = 0;
2330 u64 ts;
2331
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002332 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002333
Steven Rostedt6d158a82012-06-27 20:46:14 -04002334 buf_iter = trace_buffer_iter(iter, cpu);
2335 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002336 return;
2337
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002338 ring_buffer_iter_reset(buf_iter);
2339
2340 /*
2341 * We could have the case with the max latency tracers
2342 * that a reset never took place on a cpu. This is evident
2343 * by the timestamp being before the start of the buffer.
2344 */
2345 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002346 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002347 break;
2348 entries++;
2349 ring_buffer_read(buf_iter, NULL);
2350 }
2351
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002352 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002353}
2354
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002355/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002356 * The current tracer is copied to avoid a global locking
2357 * all around.
2358 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359static void *s_start(struct seq_file *m, loff_t *pos)
2360{
2361 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002362 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002363 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002364 void *p = NULL;
2365 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002366 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002367
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002368 /*
2369 * copy the tracer to avoid using a global lock all around.
2370 * iter->trace is a copy of current_trace, the pointer to the
2371 * name may be used instead of a strcmp(), as iter->trace->name
2372 * will point to the same string as current_trace->name.
2373 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002374 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002375 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2376 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002377 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002379#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002380 if (iter->snapshot && iter->trace->use_max_tr)
2381 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002382#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002383
2384 if (!iter->snapshot)
2385 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002386
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002387 if (*pos != iter->pos) {
2388 iter->ent = NULL;
2389 iter->cpu = 0;
2390 iter->idx = -1;
2391
Steven Rostedtae3b5092013-01-23 15:22:59 -05002392 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002393 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002395 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002396 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397
Lai Jiangshanac91d852010-03-02 17:54:50 +08002398 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2400 ;
2401
2402 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002403 /*
2404 * If we overflowed the seq_file before, then we want
2405 * to just reuse the trace_seq buffer again.
2406 */
2407 if (iter->leftover)
2408 p = iter;
2409 else {
2410 l = *pos - 1;
2411 p = s_next(m, p, &l);
2412 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413 }
2414
Lai Jiangshan4f535962009-05-18 19:35:34 +08002415 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002416 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 return p;
2418}
2419
2420static void s_stop(struct seq_file *m, void *p)
2421{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002422 struct trace_iterator *iter = m->private;
2423
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002424#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002425 if (iter->snapshot && iter->trace->use_max_tr)
2426 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002427#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002428
2429 if (!iter->snapshot)
2430 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002432 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002433 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434}
2435
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002436static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002437get_total_entries(struct trace_buffer *buf,
2438 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002439{
2440 unsigned long count;
2441 int cpu;
2442
2443 *total = 0;
2444 *entries = 0;
2445
2446 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002448 /*
2449 * If this buffer has skipped entries, then we hold all
2450 * entries for the trace and we need to ignore the
2451 * ones before the time stamp.
2452 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002453 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2454 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002455 /* total is the same as the entries */
2456 *total += count;
2457 } else
2458 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002459 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002460 *entries += count;
2461 }
2462}
2463
Ingo Molnare309b412008-05-12 21:20:51 +02002464static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Michael Ellermana6168352008-08-20 16:36:11 -07002466 seq_puts(m, "# _------=> CPU# \n");
2467 seq_puts(m, "# / _-----=> irqs-off \n");
2468 seq_puts(m, "# | / _----=> need-resched \n");
2469 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2470 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002471 seq_puts(m, "# |||| / delay \n");
2472 seq_puts(m, "# cmd pid ||||| time | caller \n");
2473 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002474}
2475
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002476static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002477{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002478 unsigned long total;
2479 unsigned long entries;
2480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2483 entries, total, num_online_cpus());
2484 seq_puts(m, "#\n");
2485}
2486
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002488{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002489 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002490 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002491 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002492}
2493
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002494static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002495{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002496 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002497 seq_puts(m, "# _-----=> irqs-off\n");
2498 seq_puts(m, "# / _----=> need-resched\n");
2499 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2500 seq_puts(m, "# || / _--=> preempt-depth\n");
2501 seq_puts(m, "# ||| / delay\n");
2502 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2503 seq_puts(m, "# | | | |||| | |\n");
2504}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002505
Jiri Olsa62b915f2010-04-02 19:01:22 +02002506void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002507print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2508{
2509 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510 struct trace_buffer *buf = iter->trace_buffer;
2511 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002512 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002513 unsigned long entries;
2514 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002515 const char *name = "preemption";
2516
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002517 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002518
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002521 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002523 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002525 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002526 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002527 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002529 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531#if defined(CONFIG_PREEMPT_NONE)
2532 "server",
2533#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2534 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002535#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536 "preempt",
2537#else
2538 "unknown",
2539#endif
2540 /* These are reserved for later use */
2541 0, 0, 0, 0);
2542#ifdef CONFIG_SMP
2543 seq_printf(m, " #P:%d)\n", num_online_cpus());
2544#else
2545 seq_puts(m, ")\n");
2546#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002547 seq_puts(m, "# -----------------\n");
2548 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002549 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002550 data->comm, data->pid,
2551 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002552 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554
2555 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002556 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002557 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2558 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002559 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002560 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2561 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002562 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563 }
2564
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002565 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566}
2567
Steven Rostedta3097202008-11-07 22:36:02 -05002568static void test_cpu_buff_start(struct trace_iterator *iter)
2569{
2570 struct trace_seq *s = &iter->seq;
2571
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002572 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2573 return;
2574
2575 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2576 return;
2577
Rusty Russell44623442009-01-01 10:12:23 +10302578 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002579 return;
2580
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002581 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002582 return;
2583
Rusty Russell44623442009-01-01 10:12:23 +10302584 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002585
2586 /* Don't print started cpu buffer for the first entry of the trace */
2587 if (iter->idx > 1)
2588 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2589 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002590}
2591
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002592static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002593{
Steven Rostedt214023c2008-05-12 21:20:46 +02002594 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002596 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002597 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002599 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002600
Steven Rostedta3097202008-11-07 22:36:02 -05002601 test_cpu_buff_start(iter);
2602
Steven Rostedtf633cef2008-12-23 23:24:13 -05002603 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002604
2605 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002606 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2607 if (!trace_print_lat_context(iter))
2608 goto partial;
2609 } else {
2610 if (!trace_print_context(iter))
2611 goto partial;
2612 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002613 }
2614
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002615 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002616 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002617
2618 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2619 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002620
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002621 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002622partial:
2623 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002624}
2625
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002626static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002627{
2628 struct trace_seq *s = &iter->seq;
2629 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002630 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002631
2632 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002633
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002634 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002635 if (!trace_seq_printf(s, "%d %d %llu ",
2636 entry->pid, iter->cpu, iter->ts))
2637 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002638 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002639
Steven Rostedtf633cef2008-12-23 23:24:13 -05002640 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002641 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002642 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002643
2644 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2645 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002646
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002647 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002648partial:
2649 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002650}
2651
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002652static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002653{
2654 struct trace_seq *s = &iter->seq;
2655 unsigned char newline = '\n';
2656 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002657 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002658
2659 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002660
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2663 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2664 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2665 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002666
Steven Rostedtf633cef2008-12-23 23:24:13 -05002667 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002668 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002669 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002670 if (ret != TRACE_TYPE_HANDLED)
2671 return ret;
2672 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002673
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002674 SEQ_PUT_FIELD_RET(s, newline);
2675
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002676 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002677}
2678
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002679static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002680{
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002683 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002684
2685 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002686
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002687 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2688 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002689 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002690 SEQ_PUT_FIELD_RET(s, iter->ts);
2691 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002692
Steven Rostedtf633cef2008-12-23 23:24:13 -05002693 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002694 return event ? event->funcs->binary(iter, 0, event) :
2695 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002696}
2697
Jiri Olsa62b915f2010-04-02 19:01:22 +02002698int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002699{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002700 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002701 int cpu;
2702
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002703 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002704 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002705 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002706 buf_iter = trace_buffer_iter(iter, cpu);
2707 if (buf_iter) {
2708 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002709 return 0;
2710 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002711 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002712 return 0;
2713 }
2714 return 1;
2715 }
2716
Steven Rostedtab464282008-05-12 21:21:00 +02002717 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002718 buf_iter = trace_buffer_iter(iter, cpu);
2719 if (buf_iter) {
2720 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002721 return 0;
2722 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002723 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002724 return 0;
2725 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002726 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002727
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002728 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002729}
2730
Lai Jiangshan4f535962009-05-18 19:35:34 +08002731/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002732enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002733{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002734 enum print_line_t ret;
2735
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002736 if (iter->lost_events &&
2737 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2738 iter->cpu, iter->lost_events))
2739 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002740
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002741 if (iter->trace && iter->trace->print_line) {
2742 ret = iter->trace->print_line(iter);
2743 if (ret != TRACE_TYPE_UNHANDLED)
2744 return ret;
2745 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002746
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002747 if (iter->ent->type == TRACE_BPUTS &&
2748 trace_flags & TRACE_ITER_PRINTK &&
2749 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2750 return trace_print_bputs_msg_only(iter);
2751
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002752 if (iter->ent->type == TRACE_BPRINT &&
2753 trace_flags & TRACE_ITER_PRINTK &&
2754 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002755 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002756
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002757 if (iter->ent->type == TRACE_PRINT &&
2758 trace_flags & TRACE_ITER_PRINTK &&
2759 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002760 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002761
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002762 if (trace_flags & TRACE_ITER_BIN)
2763 return print_bin_fmt(iter);
2764
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002765 if (trace_flags & TRACE_ITER_HEX)
2766 return print_hex_fmt(iter);
2767
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002768 if (trace_flags & TRACE_ITER_RAW)
2769 return print_raw_fmt(iter);
2770
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002771 return print_trace_fmt(iter);
2772}
2773
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002774void trace_latency_header(struct seq_file *m)
2775{
2776 struct trace_iterator *iter = m->private;
2777
2778 /* print nothing if the buffers are empty */
2779 if (trace_empty(iter))
2780 return;
2781
2782 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2783 print_trace_header(m, iter);
2784
2785 if (!(trace_flags & TRACE_ITER_VERBOSE))
2786 print_lat_help_header(m);
2787}
2788
Jiri Olsa62b915f2010-04-02 19:01:22 +02002789void trace_default_header(struct seq_file *m)
2790{
2791 struct trace_iterator *iter = m->private;
2792
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002793 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2794 return;
2795
Jiri Olsa62b915f2010-04-02 19:01:22 +02002796 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2797 /* print nothing if the buffers are empty */
2798 if (trace_empty(iter))
2799 return;
2800 print_trace_header(m, iter);
2801 if (!(trace_flags & TRACE_ITER_VERBOSE))
2802 print_lat_help_header(m);
2803 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002804 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2805 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002806 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002807 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002808 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002809 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002810 }
2811}
2812
Steven Rostedte0a413f2011-09-29 21:26:16 -04002813static void test_ftrace_alive(struct seq_file *m)
2814{
2815 if (!ftrace_is_dead())
2816 return;
2817 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2818 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2819}
2820
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002821#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002822static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002823{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002824 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2825 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2826 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002827 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002828 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2829 seq_printf(m, "# is not a '0' or '1')\n");
2830}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002831
2832static void show_snapshot_percpu_help(struct seq_file *m)
2833{
2834 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2835#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2836 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2837 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2838#else
2839 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2840 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2841#endif
2842 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2843 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2844 seq_printf(m, "# is not a '0' or '1')\n");
2845}
2846
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002847static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2848{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002849 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002850 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2851 else
2852 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2853
2854 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002855 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2856 show_snapshot_main_help(m);
2857 else
2858 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002859}
2860#else
2861/* Should never be called */
2862static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2863#endif
2864
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002865static int s_show(struct seq_file *m, void *v)
2866{
2867 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002868 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002869
2870 if (iter->ent == NULL) {
2871 if (iter->tr) {
2872 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2873 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002874 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002875 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002876 if (iter->snapshot && trace_empty(iter))
2877 print_snapshot_help(m, iter);
2878 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002879 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002880 else
2881 trace_default_header(m);
2882
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002883 } else if (iter->leftover) {
2884 /*
2885 * If we filled the seq_file buffer earlier, we
2886 * want to just show it now.
2887 */
2888 ret = trace_print_seq(m, &iter->seq);
2889
2890 /* ret should this time be zero, but you never know */
2891 iter->leftover = ret;
2892
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002893 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002894 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002895 ret = trace_print_seq(m, &iter->seq);
2896 /*
2897 * If we overflow the seq_file buffer, then it will
2898 * ask us for this data again at start up.
2899 * Use that instead.
2900 * ret is 0 if seq_file write succeeded.
2901 * -1 otherwise.
2902 */
2903 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002904 }
2905
2906 return 0;
2907}
2908
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002909/*
2910 * Should be used after trace_array_get(), trace_types_lock
2911 * ensures that i_cdev was already initialized.
2912 */
2913static inline int tracing_get_cpu(struct inode *inode)
2914{
2915 if (inode->i_cdev) /* See trace_create_cpu_file() */
2916 return (long)inode->i_cdev - 1;
2917 return RING_BUFFER_ALL_CPUS;
2918}
2919
James Morris88e9d342009-09-22 16:43:43 -07002920static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002921 .start = s_start,
2922 .next = s_next,
2923 .stop = s_stop,
2924 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925};
2926
Ingo Molnare309b412008-05-12 21:20:51 +02002927static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002928__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002930 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002931 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002932 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002933
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002934 if (tracing_disabled)
2935 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002936
Jiri Olsa50e18b92012-04-25 10:23:39 +02002937 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002938 if (!iter)
2939 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002940
Steven Rostedt6d158a82012-06-27 20:46:14 -04002941 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2942 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002943 if (!iter->buffer_iter)
2944 goto release;
2945
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002946 /*
2947 * We make a copy of the current tracer to avoid concurrent
2948 * changes on it while we are reading.
2949 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002950 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002951 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002952 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002953 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002954
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002955 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002956
Li Zefan79f55992009-06-15 14:58:26 +08002957 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002958 goto fail;
2959
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002960 iter->tr = tr;
2961
2962#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002963 /* Currently only the top directory has a snapshot */
2964 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002965 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002966 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002967#endif
2968 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002969 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002970 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002971 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002972 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002974 /* Notify the tracer early; before we stop tracing. */
2975 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002976 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002977
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002978 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002979 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002980 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2981
David Sharp8be07092012-11-13 12:18:22 -08002982 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002983 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002984 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2985
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002986 /* stop the trace while dumping if we are not opening "snapshot" */
2987 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002988 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002989
Steven Rostedtae3b5092013-01-23 15:22:59 -05002990 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002991 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002992 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002993 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002994 }
2995 ring_buffer_read_prepare_sync();
2996 for_each_tracing_cpu(cpu) {
2997 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002998 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002999 }
3000 } else {
3001 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003002 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003003 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003004 ring_buffer_read_prepare_sync();
3005 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003006 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003007 }
3008
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003009 mutex_unlock(&trace_types_lock);
3010
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003011 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003012
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003014 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003015 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003016 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003017release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003018 seq_release_private(inode, file);
3019 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020}
3021
3022int tracing_open_generic(struct inode *inode, struct file *filp)
3023{
Steven Rostedt60a11772008-05-12 21:20:44 +02003024 if (tracing_disabled)
3025 return -ENODEV;
3026
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027 filp->private_data = inode->i_private;
3028 return 0;
3029}
3030
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003031bool tracing_is_disabled(void)
3032{
3033 return (tracing_disabled) ? true: false;
3034}
3035
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003036/*
3037 * Open and update trace_array ref count.
3038 * Must have the current trace_array passed to it.
3039 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003040static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003041{
3042 struct trace_array *tr = inode->i_private;
3043
3044 if (tracing_disabled)
3045 return -ENODEV;
3046
3047 if (trace_array_get(tr) < 0)
3048 return -ENODEV;
3049
3050 filp->private_data = inode->i_private;
3051
3052 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003053}
3054
Hannes Eder4fd27352009-02-10 19:44:12 +01003055static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003056{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003057 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003058 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003059 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003060 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003061
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003062 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003063 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003064 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003065 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003066
Oleg Nesterov6484c712013-07-23 17:26:10 +02003067 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003068 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003070
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003071 for_each_tracing_cpu(cpu) {
3072 if (iter->buffer_iter[cpu])
3073 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3074 }
3075
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076 if (iter->trace && iter->trace->close)
3077 iter->trace->close(iter);
3078
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003079 if (!iter->snapshot)
3080 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003081 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003082
3083 __trace_array_put(tr);
3084
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003085 mutex_unlock(&trace_types_lock);
3086
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003087 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003088 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003089 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003090 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003091 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003092
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003093 return 0;
3094}
3095
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003096static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3097{
3098 struct trace_array *tr = inode->i_private;
3099
3100 trace_array_put(tr);
3101 return 0;
3102}
3103
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003104static int tracing_single_release_tr(struct inode *inode, struct file *file)
3105{
3106 struct trace_array *tr = inode->i_private;
3107
3108 trace_array_put(tr);
3109
3110 return single_release(inode, file);
3111}
3112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113static int tracing_open(struct inode *inode, struct file *file)
3114{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003115 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003116 struct trace_iterator *iter;
3117 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003118
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003119 if (trace_array_get(tr) < 0)
3120 return -ENODEV;
3121
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3124 int cpu = tracing_get_cpu(inode);
3125
3126 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003127 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003128 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003129 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003130 }
3131
3132 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003133 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003134 if (IS_ERR(iter))
3135 ret = PTR_ERR(iter);
3136 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3137 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3138 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003139
3140 if (ret < 0)
3141 trace_array_put(tr);
3142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003143 return ret;
3144}
3145
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003146/*
3147 * Some tracers are not suitable for instance buffers.
3148 * A tracer is always available for the global array (toplevel)
3149 * or if it explicitly states that it is.
3150 */
3151static bool
3152trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3153{
3154 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3155}
3156
3157/* Find the next tracer that this trace array may use */
3158static struct tracer *
3159get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3160{
3161 while (t && !trace_ok_for_array(t, tr))
3162 t = t->next;
3163
3164 return t;
3165}
3166
Ingo Molnare309b412008-05-12 21:20:51 +02003167static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003168t_next(struct seq_file *m, void *v, loff_t *pos)
3169{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003170 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003171 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172
3173 (*pos)++;
3174
3175 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003176 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178 return t;
3179}
3180
3181static void *t_start(struct seq_file *m, loff_t *pos)
3182{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003183 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003184 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003185 loff_t l = 0;
3186
3187 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003188
3189 t = get_tracer_for_array(tr, trace_types);
3190 for (; t && l < *pos; t = t_next(m, t, &l))
3191 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003192
3193 return t;
3194}
3195
3196static void t_stop(struct seq_file *m, void *p)
3197{
3198 mutex_unlock(&trace_types_lock);
3199}
3200
3201static int t_show(struct seq_file *m, void *v)
3202{
3203 struct tracer *t = v;
3204
3205 if (!t)
3206 return 0;
3207
3208 seq_printf(m, "%s", t->name);
3209 if (t->next)
3210 seq_putc(m, ' ');
3211 else
3212 seq_putc(m, '\n');
3213
3214 return 0;
3215}
3216
James Morris88e9d342009-09-22 16:43:43 -07003217static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003218 .start = t_start,
3219 .next = t_next,
3220 .stop = t_stop,
3221 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222};
3223
3224static int show_traces_open(struct inode *inode, struct file *file)
3225{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003226 struct trace_array *tr = inode->i_private;
3227 struct seq_file *m;
3228 int ret;
3229
Steven Rostedt60a11772008-05-12 21:20:44 +02003230 if (tracing_disabled)
3231 return -ENODEV;
3232
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003233 ret = seq_open(file, &show_traces_seq_ops);
3234 if (ret)
3235 return ret;
3236
3237 m = file->private_data;
3238 m->private = tr;
3239
3240 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241}
3242
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003243static ssize_t
3244tracing_write_stub(struct file *filp, const char __user *ubuf,
3245 size_t count, loff_t *ppos)
3246{
3247 return count;
3248}
3249
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003250loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003251{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003252 int ret;
3253
Slava Pestov364829b2010-11-24 15:13:16 -08003254 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003255 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003256 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003257 file->f_pos = ret = 0;
3258
3259 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003260}
3261
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003262static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003263 .open = tracing_open,
3264 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003265 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003266 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003267 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003268};
3269
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003270static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003271 .open = show_traces_open,
3272 .read = seq_read,
3273 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003274 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003275};
3276
Ingo Molnar36dfe922008-05-12 21:20:52 +02003277/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003278 * The tracer itself will not take this lock, but still we want
3279 * to provide a consistent cpumask to user-space:
3280 */
3281static DEFINE_MUTEX(tracing_cpumask_update_lock);
3282
3283/*
3284 * Temporary storage for the character representation of the
3285 * CPU bitmask (and one more byte for the newline):
3286 */
3287static char mask_str[NR_CPUS + 1];
3288
Ingo Molnarc7078de2008-05-12 21:20:52 +02003289static ssize_t
3290tracing_cpumask_read(struct file *filp, char __user *ubuf,
3291 size_t count, loff_t *ppos)
3292{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003293 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003294 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003295
3296 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003297
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003298 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003299 if (count - len < 2) {
3300 count = -EINVAL;
3301 goto out_err;
3302 }
3303 len += sprintf(mask_str + len, "\n");
3304 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3305
3306out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003307 mutex_unlock(&tracing_cpumask_update_lock);
3308
3309 return count;
3310}
3311
3312static ssize_t
3313tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3314 size_t count, loff_t *ppos)
3315{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003316 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303317 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003318 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303319
3320 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3321 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003322
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303323 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003324 if (err)
3325 goto err_unlock;
3326
Li Zefan215368e2009-06-15 10:56:42 +08003327 mutex_lock(&tracing_cpumask_update_lock);
3328
Steven Rostedta5e25882008-12-02 15:34:05 -05003329 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003330 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003331 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003332 /*
3333 * Increase/decrease the disabled counter if we are
3334 * about to flip a bit in the cpumask:
3335 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003336 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303337 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003338 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3339 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003340 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003341 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303342 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003343 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3344 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003345 }
3346 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003347 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003348 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003349
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003350 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003351
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303353 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003354
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003356
3357err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003358 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003359
3360 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003361}
3362
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003363static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003364 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003365 .read = tracing_cpumask_read,
3366 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003367 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003368 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003369};
3370
Li Zefanfdb372e2009-12-08 11:15:59 +08003371static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003372{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003373 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003374 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003375 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003376 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003377
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003378 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003379 tracer_flags = tr->current_trace->flags->val;
3380 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003381
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003382 for (i = 0; trace_options[i]; i++) {
3383 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003384 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003385 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003386 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003387 }
3388
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003389 for (i = 0; trace_opts[i].name; i++) {
3390 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003391 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003392 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003393 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003394 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003395 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003396
Li Zefanfdb372e2009-12-08 11:15:59 +08003397 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003398}
3399
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003400static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003401 struct tracer_flags *tracer_flags,
3402 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003403{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003404 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003405 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003406
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003407 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003408 if (ret)
3409 return ret;
3410
3411 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003412 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003413 else
Zhaolei77708412009-08-07 18:53:21 +08003414 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003415 return 0;
3416}
3417
Li Zefan8d18eaa2009-12-08 11:17:06 +08003418/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003419static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003420{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003421 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003422 struct tracer_flags *tracer_flags = trace->flags;
3423 struct tracer_opt *opts = NULL;
3424 int i;
3425
3426 for (i = 0; tracer_flags->opts[i].name; i++) {
3427 opts = &tracer_flags->opts[i];
3428
3429 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003430 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003431 }
3432
3433 return -EINVAL;
3434}
3435
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003436/* Some tracers require overwrite to stay enabled */
3437int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3438{
3439 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3440 return -1;
3441
3442 return 0;
3443}
3444
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003445int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003446{
3447 /* do nothing if flag is already set */
3448 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003449 return 0;
3450
3451 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003452 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003453 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003454 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003455
3456 if (enabled)
3457 trace_flags |= mask;
3458 else
3459 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003460
3461 if (mask == TRACE_ITER_RECORD_CMD)
3462 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003463
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003464 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003465 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003466#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003467 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003468#endif
3469 }
Steven Rostedt81698832012-10-11 10:15:05 -04003470
3471 if (mask == TRACE_ITER_PRINTK)
3472 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003473
3474 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003475}
3476
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003477static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003478{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003479 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003480 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003481 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482 int i;
3483
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003484 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485
Li Zefan8d18eaa2009-12-08 11:17:06 +08003486 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 neg = 1;
3488 cmp += 2;
3489 }
3490
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003491 mutex_lock(&trace_types_lock);
3492
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003493 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003494 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003495 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003496 break;
3497 }
3498 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003499
3500 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003501 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003502 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003503
3504 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003505
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003506 return ret;
3507}
3508
3509static ssize_t
3510tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3511 size_t cnt, loff_t *ppos)
3512{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003513 struct seq_file *m = filp->private_data;
3514 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003515 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003516 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003517
3518 if (cnt >= sizeof(buf))
3519 return -EINVAL;
3520
3521 if (copy_from_user(&buf, ubuf, cnt))
3522 return -EFAULT;
3523
Steven Rostedta8dd2172013-01-09 20:54:17 -05003524 buf[cnt] = 0;
3525
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003526 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003527 if (ret < 0)
3528 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003529
Jiri Olsacf8517c2009-10-23 19:36:16 -04003530 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531
3532 return cnt;
3533}
3534
Li Zefanfdb372e2009-12-08 11:15:59 +08003535static int tracing_trace_options_open(struct inode *inode, struct file *file)
3536{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003537 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003538 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003539
Li Zefanfdb372e2009-12-08 11:15:59 +08003540 if (tracing_disabled)
3541 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003542
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003543 if (trace_array_get(tr) < 0)
3544 return -ENODEV;
3545
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003546 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3547 if (ret < 0)
3548 trace_array_put(tr);
3549
3550 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003551}
3552
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003553static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003554 .open = tracing_trace_options_open,
3555 .read = seq_read,
3556 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003557 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003558 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003559};
3560
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003561static const char readme_msg[] =
3562 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003563 "# echo 0 > tracing_on : quick way to disable tracing\n"
3564 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3565 " Important files:\n"
3566 " trace\t\t\t- The static contents of the buffer\n"
3567 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3568 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3569 " current_tracer\t- function and latency tracers\n"
3570 " available_tracers\t- list of configured tracers for current_tracer\n"
3571 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3572 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3573 " trace_clock\t\t-change the clock used to order events\n"
3574 " local: Per cpu clock but may not be synced across CPUs\n"
3575 " global: Synced across CPUs but slows tracing down.\n"
3576 " counter: Not a clock, but just an increment\n"
3577 " uptime: Jiffy counter from time of boot\n"
3578 " perf: Same clock that perf events use\n"
3579#ifdef CONFIG_X86_64
3580 " x86-tsc: TSC cycle counter\n"
3581#endif
3582 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3583 " tracing_cpumask\t- Limit which CPUs to trace\n"
3584 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3585 "\t\t\t Remove sub-buffer with rmdir\n"
3586 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003587 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3588 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003589#ifdef CONFIG_DYNAMIC_FTRACE
3590 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3592 "\t\t\t functions\n"
3593 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3594 "\t modules: Can select a group via module\n"
3595 "\t Format: :mod:<module-name>\n"
3596 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3597 "\t triggers: a command to perform when function is hit\n"
3598 "\t Format: <function>:<trigger>[:count]\n"
3599 "\t trigger: traceon, traceoff\n"
3600 "\t\t enable_event:<system>:<event>\n"
3601 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003602#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003603 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003604#endif
3605#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003606 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003607#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003608 "\t\t dump\n"
3609 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003610 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3611 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3612 "\t The first one will disable tracing every time do_fault is hit\n"
3613 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3614 "\t The first time do trap is hit and it disables tracing, the\n"
3615 "\t counter will decrement to 2. If tracing is already disabled,\n"
3616 "\t the counter will not decrement. It only decrements when the\n"
3617 "\t trigger did work\n"
3618 "\t To remove trigger without count:\n"
3619 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3620 "\t To remove trigger with a count:\n"
3621 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003622 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003623 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3624 "\t modules: Can select a group via module command :mod:\n"
3625 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003626#endif /* CONFIG_DYNAMIC_FTRACE */
3627#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003628 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3629 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003630#endif
3631#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3632 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3633 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3634#endif
3635#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003636 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3637 "\t\t\t snapshot buffer. Read the contents for more\n"
3638 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003639#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003640#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003641 " stack_trace\t\t- Shows the max stack trace when active\n"
3642 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003643 "\t\t\t Write into this file to reset the max size (trigger a\n"
3644 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003645#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003646 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3647 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003648#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003649#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003650 " events/\t\t- Directory containing all trace event subsystems:\n"
3651 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3652 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003653 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3654 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003655 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003656 " events/<system>/<event>/\t- Directory containing control files for\n"
3657 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003658 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3659 " filter\t\t- If set, only events passing filter are traced\n"
3660 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003661 "\t Format: <trigger>[:count][if <filter>]\n"
3662 "\t trigger: traceon, traceoff\n"
3663 "\t enable_event:<system>:<event>\n"
3664 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003665#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003666 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003667#endif
3668#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003669 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003670#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003671 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3672 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3673 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3674 "\t events/block/block_unplug/trigger\n"
3675 "\t The first disables tracing every time block_unplug is hit.\n"
3676 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3677 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3678 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3679 "\t Like function triggers, the counter is only decremented if it\n"
3680 "\t enabled or disabled tracing.\n"
3681 "\t To remove a trigger without a count:\n"
3682 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3683 "\t To remove a trigger with a count:\n"
3684 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3685 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003686;
3687
3688static ssize_t
3689tracing_readme_read(struct file *filp, char __user *ubuf,
3690 size_t cnt, loff_t *ppos)
3691{
3692 return simple_read_from_buffer(ubuf, cnt, ppos,
3693 readme_msg, strlen(readme_msg));
3694}
3695
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003696static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003697 .open = tracing_open_generic,
3698 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003699 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003700};
3701
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003702static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003703{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003704 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003705
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003706 if (*pos || m->count)
3707 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003708
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003709 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003710
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003711 for (; ptr < &map_cmdline_to_pid[SAVED_CMDLINES]; ptr++) {
3712 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003713 continue;
3714
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003715 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003716 }
3717
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003718 return NULL;
3719}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003720
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003721static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3722{
3723 void *v;
3724 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003725
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003726 v = &map_cmdline_to_pid[0];
3727 while (l <= *pos) {
3728 v = saved_cmdlines_next(m, v, &l);
3729 if (!v)
3730 return NULL;
3731 }
3732
3733 return v;
3734}
3735
3736static void saved_cmdlines_stop(struct seq_file *m, void *v)
3737{
3738}
3739
3740static int saved_cmdlines_show(struct seq_file *m, void *v)
3741{
3742 char buf[TASK_COMM_LEN];
3743 unsigned int *pid = v;
3744
3745 trace_find_cmdline(*pid, buf);
3746 seq_printf(m, "%d %s\n", *pid, buf);
3747 return 0;
3748}
3749
3750static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3751 .start = saved_cmdlines_start,
3752 .next = saved_cmdlines_next,
3753 .stop = saved_cmdlines_stop,
3754 .show = saved_cmdlines_show,
3755};
3756
3757static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3758{
3759 if (tracing_disabled)
3760 return -ENODEV;
3761
3762 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003763}
3764
3765static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 .open = tracing_saved_cmdlines_open,
3767 .read = seq_read,
3768 .llseek = seq_lseek,
3769 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003770};
3771
3772static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003773tracing_set_trace_read(struct file *filp, char __user *ubuf,
3774 size_t cnt, loff_t *ppos)
3775{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003776 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003777 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003778 int r;
3779
3780 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003781 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003782 mutex_unlock(&trace_types_lock);
3783
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003784 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003785}
3786
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003787int tracer_init(struct tracer *t, struct trace_array *tr)
3788{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003789 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003790 return t->init(tr);
3791}
3792
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003793static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003794{
3795 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003796
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003797 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003798 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003799}
3800
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003801#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003802/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003803static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3804 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003805{
3806 int cpu, ret = 0;
3807
3808 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3809 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003810 ret = ring_buffer_resize(trace_buf->buffer,
3811 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003812 if (ret < 0)
3813 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003814 per_cpu_ptr(trace_buf->data, cpu)->entries =
3815 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003816 }
3817 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003818 ret = ring_buffer_resize(trace_buf->buffer,
3819 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003820 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003821 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3822 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003823 }
3824
3825 return ret;
3826}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003827#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003828
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003829static int __tracing_resize_ring_buffer(struct trace_array *tr,
3830 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003831{
3832 int ret;
3833
3834 /*
3835 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003836 * we use the size that was given, and we can forget about
3837 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003838 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003839 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003840
Steven Rostedtb382ede62012-10-10 21:44:34 -04003841 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003842 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003843 return 0;
3844
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003845 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003846 if (ret < 0)
3847 return ret;
3848
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003849#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003850 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3851 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003852 goto out;
3853
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003854 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003855 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003856 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3857 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003858 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003859 /*
3860 * AARGH! We are left with different
3861 * size max buffer!!!!
3862 * The max buffer is our "snapshot" buffer.
3863 * When a tracer needs a snapshot (one of the
3864 * latency tracers), it swaps the max buffer
3865 * with the saved snap shot. We succeeded to
3866 * update the size of the main buffer, but failed to
3867 * update the size of the max buffer. But when we tried
3868 * to reset the main buffer to the original size, we
3869 * failed there too. This is very unlikely to
3870 * happen, but if it does, warn and kill all
3871 * tracing.
3872 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003873 WARN_ON(1);
3874 tracing_disabled = 1;
3875 }
3876 return ret;
3877 }
3878
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003879 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003880 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003881 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003882 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003883
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003884 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003885#endif /* CONFIG_TRACER_MAX_TRACE */
3886
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003887 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003888 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003889 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003890 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003891
3892 return ret;
3893}
3894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003895static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3896 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003897{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003898 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003899
3900 mutex_lock(&trace_types_lock);
3901
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003902 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3903 /* make sure, this cpu is enabled in the mask */
3904 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3905 ret = -EINVAL;
3906 goto out;
3907 }
3908 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003909
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003910 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003911 if (ret < 0)
3912 ret = -ENOMEM;
3913
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003914out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003915 mutex_unlock(&trace_types_lock);
3916
3917 return ret;
3918}
3919
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003920
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003921/**
3922 * tracing_update_buffers - used by tracing facility to expand ring buffers
3923 *
3924 * To save on memory when the tracing is never used on a system with it
3925 * configured in. The ring buffers are set to a minimum size. But once
3926 * a user starts to use the tracing facility, then they need to grow
3927 * to their default size.
3928 *
3929 * This function is to be called when a tracer is about to be used.
3930 */
3931int tracing_update_buffers(void)
3932{
3933 int ret = 0;
3934
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003935 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003936 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003937 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003938 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003939 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003940
3941 return ret;
3942}
3943
Steven Rostedt577b7852009-02-26 23:43:05 -05003944struct trace_option_dentry;
3945
3946static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003947create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003948
3949static void
3950destroy_trace_option_files(struct trace_option_dentry *topts);
3951
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003952/*
3953 * Used to clear out the tracer before deletion of an instance.
3954 * Must have trace_types_lock held.
3955 */
3956static void tracing_set_nop(struct trace_array *tr)
3957{
3958 if (tr->current_trace == &nop_trace)
3959 return;
3960
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003961 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003962
3963 if (tr->current_trace->reset)
3964 tr->current_trace->reset(tr);
3965
3966 tr->current_trace = &nop_trace;
3967}
3968
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003969static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003970{
Steven Rostedt577b7852009-02-26 23:43:05 -05003971 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003972 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003973#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003974 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003975#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003976 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003977
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003978 mutex_lock(&trace_types_lock);
3979
Steven Rostedt73c51622009-03-11 13:42:01 -04003980 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003981 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003982 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003983 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003984 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003985 ret = 0;
3986 }
3987
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003988 for (t = trace_types; t; t = t->next) {
3989 if (strcmp(t->name, buf) == 0)
3990 break;
3991 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003992 if (!t) {
3993 ret = -EINVAL;
3994 goto out;
3995 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003996 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003997 goto out;
3998
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003999 /* Some tracers are only allowed for the top level buffer */
4000 if (!trace_ok_for_array(t, tr)) {
4001 ret = -EINVAL;
4002 goto out;
4003 }
4004
Steven Rostedt9f029e82008-11-12 15:24:24 -05004005 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004006
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004007 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004008
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004009 if (tr->current_trace->reset)
4010 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004011
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004012 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004013 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004014
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004015#ifdef CONFIG_TRACER_MAX_TRACE
4016 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004017
4018 if (had_max_tr && !t->use_max_tr) {
4019 /*
4020 * We need to make sure that the update_max_tr sees that
4021 * current_trace changed to nop_trace to keep it from
4022 * swapping the buffers after we resize it.
4023 * The update_max_tr is called from interrupts disabled
4024 * so a synchronized_sched() is sufficient.
4025 */
4026 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004027 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004028 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004029#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004030 /* Currently, only the top instance has options */
4031 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4032 destroy_trace_option_files(topts);
4033 topts = create_trace_option_files(tr, t);
4034 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004035
4036#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004037 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004038 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004039 if (ret < 0)
4040 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004041 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004042#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004043
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004044 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004045 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004046 if (ret)
4047 goto out;
4048 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004049
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004050 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004051 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004052 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004053 out:
4054 mutex_unlock(&trace_types_lock);
4055
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004056 return ret;
4057}
4058
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004059static ssize_t
4060tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4061 size_t cnt, loff_t *ppos)
4062{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004063 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004064 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004065 int i;
4066 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004067 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004068
Steven Rostedt60063a62008-10-28 10:44:24 -04004069 ret = cnt;
4070
Li Zefanee6c2c12009-09-18 14:06:47 +08004071 if (cnt > MAX_TRACER_SIZE)
4072 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004073
4074 if (copy_from_user(&buf, ubuf, cnt))
4075 return -EFAULT;
4076
4077 buf[cnt] = 0;
4078
4079 /* strip ending whitespace. */
4080 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4081 buf[i] = 0;
4082
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004083 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004084 if (err)
4085 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086
Jiri Olsacf8517c2009-10-23 19:36:16 -04004087 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004088
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004089 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004090}
4091
4092static ssize_t
4093tracing_max_lat_read(struct file *filp, char __user *ubuf,
4094 size_t cnt, loff_t *ppos)
4095{
4096 unsigned long *ptr = filp->private_data;
4097 char buf[64];
4098 int r;
4099
Steven Rostedtcffae432008-05-12 21:21:00 +02004100 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004101 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004102 if (r > sizeof(buf))
4103 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004104 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004105}
4106
4107static ssize_t
4108tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4109 size_t cnt, loff_t *ppos)
4110{
Hannes Eder5e398412009-02-10 19:44:34 +01004111 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004112 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004113 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004114
Peter Huewe22fe9b52011-06-07 21:58:27 +02004115 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4116 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004117 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004118
4119 *ptr = val * 1000;
4120
4121 return cnt;
4122}
4123
Steven Rostedtb3806b42008-05-12 21:20:46 +02004124static int tracing_open_pipe(struct inode *inode, struct file *filp)
4125{
Oleg Nesterov15544202013-07-23 17:25:57 +02004126 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004127 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004128 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004129
4130 if (tracing_disabled)
4131 return -ENODEV;
4132
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004133 if (trace_array_get(tr) < 0)
4134 return -ENODEV;
4135
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004136 mutex_lock(&trace_types_lock);
4137
Steven Rostedtb3806b42008-05-12 21:20:46 +02004138 /* create a buffer to store the information to pass to userspace */
4139 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004140 if (!iter) {
4141 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004142 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004143 goto out;
4144 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004145
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004146 /*
4147 * We make a copy of the current tracer to avoid concurrent
4148 * changes on it while we are reading.
4149 */
4150 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4151 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004152 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004153 goto fail;
4154 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004155 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004156
4157 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4158 ret = -ENOMEM;
4159 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304160 }
4161
Steven Rostedta3097202008-11-07 22:36:02 -05004162 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304163 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004164
Steven Rostedt112f38a72009-06-01 15:16:05 -04004165 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4166 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4167
David Sharp8be07092012-11-13 12:18:22 -08004168 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004169 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004170 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4171
Oleg Nesterov15544202013-07-23 17:25:57 +02004172 iter->tr = tr;
4173 iter->trace_buffer = &tr->trace_buffer;
4174 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004175 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004176 filp->private_data = iter;
4177
Steven Rostedt107bad82008-05-12 21:21:01 +02004178 if (iter->trace->pipe_open)
4179 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004180
Arnd Bergmannb4447862010-07-07 23:40:11 +02004181 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004182out:
4183 mutex_unlock(&trace_types_lock);
4184 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004185
4186fail:
4187 kfree(iter->trace);
4188 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004189 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004190 mutex_unlock(&trace_types_lock);
4191 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004192}
4193
4194static int tracing_release_pipe(struct inode *inode, struct file *file)
4195{
4196 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004197 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004198
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004199 mutex_lock(&trace_types_lock);
4200
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004201 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004202 iter->trace->pipe_close(iter);
4203
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004204 mutex_unlock(&trace_types_lock);
4205
Rusty Russell44623442009-01-01 10:12:23 +10304206 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004207 mutex_destroy(&iter->mutex);
4208 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004209 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004210
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004211 trace_array_put(tr);
4212
Steven Rostedtb3806b42008-05-12 21:20:46 +02004213 return 0;
4214}
4215
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004216static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004217trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004218{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004219 /* Iterators are static, they should be filled or empty */
4220 if (trace_buffer_iter(iter, iter->cpu_file))
4221 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004222
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004223 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004224 /*
4225 * Always select as readable when in blocking mode
4226 */
4227 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004228 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004229 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004230 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004231}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004232
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004233static unsigned int
4234tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4235{
4236 struct trace_iterator *iter = filp->private_data;
4237
4238 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004239}
4240
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004241/* Must be called with trace_types_lock mutex held. */
4242static int tracing_wait_pipe(struct file *filp)
4243{
4244 struct trace_iterator *iter = filp->private_data;
4245
4246 while (trace_empty(iter)) {
4247
4248 if ((filp->f_flags & O_NONBLOCK)) {
4249 return -EAGAIN;
4250 }
4251
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004252 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004253 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004254 * We still block if tracing is disabled, but we have never
4255 * read anything. This allows a user to cat this file, and
4256 * then enable tracing. But after we have read something,
4257 * we give an EOF when tracing is again disabled.
4258 *
4259 * iter->pos will be 0 if we haven't read anything.
4260 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004261 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004262 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004263
4264 mutex_unlock(&iter->mutex);
4265
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04004266 wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004267
4268 mutex_lock(&iter->mutex);
4269
4270 if (signal_pending(current))
4271 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004272 }
4273
4274 return 1;
4275}
4276
Steven Rostedtb3806b42008-05-12 21:20:46 +02004277/*
4278 * Consumer reader.
4279 */
4280static ssize_t
4281tracing_read_pipe(struct file *filp, char __user *ubuf,
4282 size_t cnt, loff_t *ppos)
4283{
4284 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004285 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004286 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004287
4288 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004289 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4290 if (sret != -EBUSY)
4291 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004292
Steven Rostedtf9520752009-03-02 14:04:40 -05004293 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004294
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004295 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004296 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004297 if (unlikely(iter->trace->name != tr->current_trace->name))
4298 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004299 mutex_unlock(&trace_types_lock);
4300
4301 /*
4302 * Avoid more than one consumer on a single file descriptor
4303 * This is just a matter of traces coherency, the ring buffer itself
4304 * is protected.
4305 */
4306 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004307 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004308 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4309 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004310 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004311 }
4312
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004313waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004314 sret = tracing_wait_pipe(filp);
4315 if (sret <= 0)
4316 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004317
4318 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004319 if (trace_empty(iter)) {
4320 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004321 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004322 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004323
4324 if (cnt >= PAGE_SIZE)
4325 cnt = PAGE_SIZE - 1;
4326
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004327 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004328 memset(&iter->seq, 0,
4329 sizeof(struct trace_iterator) -
4330 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004331 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004332 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004333
Lai Jiangshan4f535962009-05-18 19:35:34 +08004334 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004335 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004336 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004337 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004338 int len = iter->seq.len;
4339
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004340 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004341 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004342 /* don't print partial lines */
4343 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004344 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004345 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004346 if (ret != TRACE_TYPE_NO_CONSUME)
4347 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004348
4349 if (iter->seq.len >= cnt)
4350 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004351
4352 /*
4353 * Setting the full flag means we reached the trace_seq buffer
4354 * size and we should leave by partial output condition above.
4355 * One of the trace_seq_* functions is not used properly.
4356 */
4357 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4358 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004359 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004360 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004361 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004362
Steven Rostedtb3806b42008-05-12 21:20:46 +02004363 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004364 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4365 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004366 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004367
4368 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004369 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004370 * entries, go back to wait for more entries.
4371 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004372 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004373 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004374
Steven Rostedt107bad82008-05-12 21:21:01 +02004375out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004376 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004377
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004378 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004379}
4380
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004381static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4382 unsigned int idx)
4383{
4384 __free_page(spd->pages[idx]);
4385}
4386
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004387static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004388 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004389 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004390 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004391 .steal = generic_pipe_buf_steal,
4392 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004393};
4394
Steven Rostedt34cd4992009-02-09 12:06:29 -05004395static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004396tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004397{
4398 size_t count;
4399 int ret;
4400
4401 /* Seq buffer is page-sized, exactly what we need. */
4402 for (;;) {
4403 count = iter->seq.len;
4404 ret = print_trace_line(iter);
4405 count = iter->seq.len - count;
4406 if (rem < count) {
4407 rem = 0;
4408 iter->seq.len -= count;
4409 break;
4410 }
4411 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4412 iter->seq.len -= count;
4413 break;
4414 }
4415
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004416 if (ret != TRACE_TYPE_NO_CONSUME)
4417 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004418 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004419 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004420 rem = 0;
4421 iter->ent = NULL;
4422 break;
4423 }
4424 }
4425
4426 return rem;
4427}
4428
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004429static ssize_t tracing_splice_read_pipe(struct file *filp,
4430 loff_t *ppos,
4431 struct pipe_inode_info *pipe,
4432 size_t len,
4433 unsigned int flags)
4434{
Jens Axboe35f3d142010-05-20 10:43:18 +02004435 struct page *pages_def[PIPE_DEF_BUFFERS];
4436 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004437 struct trace_iterator *iter = filp->private_data;
4438 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004439 .pages = pages_def,
4440 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004441 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004442 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004443 .flags = flags,
4444 .ops = &tracing_pipe_buf_ops,
4445 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004446 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004447 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004448 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004449 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004450 unsigned int i;
4451
Jens Axboe35f3d142010-05-20 10:43:18 +02004452 if (splice_grow_spd(pipe, &spd))
4453 return -ENOMEM;
4454
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004455 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004456 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004457 if (unlikely(iter->trace->name != tr->current_trace->name))
4458 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004459 mutex_unlock(&trace_types_lock);
4460
4461 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004462
4463 if (iter->trace->splice_read) {
4464 ret = iter->trace->splice_read(iter, filp,
4465 ppos, pipe, len, flags);
4466 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004467 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004468 }
4469
4470 ret = tracing_wait_pipe(filp);
4471 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004472 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004473
Jason Wessel955b61e2010-08-05 09:22:23 -05004474 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004475 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004476 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004477 }
4478
Lai Jiangshan4f535962009-05-18 19:35:34 +08004479 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004480 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004481
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004482 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004483 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004484 spd.pages[i] = alloc_page(GFP_KERNEL);
4485 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004486 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004487
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004488 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004489
4490 /* Copy the data into the page, so we can start over. */
4491 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004492 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004493 iter->seq.len);
4494 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004495 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004496 break;
4497 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004498 spd.partial[i].offset = 0;
4499 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004500
Steven Rostedtf9520752009-03-02 14:04:40 -05004501 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004502 }
4503
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004504 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004505 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004506 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004507
4508 spd.nr_pages = i;
4509
Jens Axboe35f3d142010-05-20 10:43:18 +02004510 ret = splice_to_pipe(pipe, &spd);
4511out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004512 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004513 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004514
Steven Rostedt34cd4992009-02-09 12:06:29 -05004515out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004516 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004517 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004518}
4519
Steven Rostedta98a3c32008-05-12 21:20:59 +02004520static ssize_t
4521tracing_entries_read(struct file *filp, char __user *ubuf,
4522 size_t cnt, loff_t *ppos)
4523{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004524 struct inode *inode = file_inode(filp);
4525 struct trace_array *tr = inode->i_private;
4526 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004527 char buf[64];
4528 int r = 0;
4529 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004530
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004531 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004532
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004533 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004534 int cpu, buf_size_same;
4535 unsigned long size;
4536
4537 size = 0;
4538 buf_size_same = 1;
4539 /* check if all cpu sizes are same */
4540 for_each_tracing_cpu(cpu) {
4541 /* fill in the size from first enabled cpu */
4542 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004543 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4544 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004545 buf_size_same = 0;
4546 break;
4547 }
4548 }
4549
4550 if (buf_size_same) {
4551 if (!ring_buffer_expanded)
4552 r = sprintf(buf, "%lu (expanded: %lu)\n",
4553 size >> 10,
4554 trace_buf_size >> 10);
4555 else
4556 r = sprintf(buf, "%lu\n", size >> 10);
4557 } else
4558 r = sprintf(buf, "X\n");
4559 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004560 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004561
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004562 mutex_unlock(&trace_types_lock);
4563
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004564 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4565 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004566}
4567
4568static ssize_t
4569tracing_entries_write(struct file *filp, const char __user *ubuf,
4570 size_t cnt, loff_t *ppos)
4571{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004572 struct inode *inode = file_inode(filp);
4573 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004574 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004575 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004576
Peter Huewe22fe9b52011-06-07 21:58:27 +02004577 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4578 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004579 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004580
4581 /* must have at least 1 entry */
4582 if (!val)
4583 return -EINVAL;
4584
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004585 /* value is in KB */
4586 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004587 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004588 if (ret < 0)
4589 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004590
Jiri Olsacf8517c2009-10-23 19:36:16 -04004591 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004592
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004593 return cnt;
4594}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004595
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004596static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004597tracing_total_entries_read(struct file *filp, char __user *ubuf,
4598 size_t cnt, loff_t *ppos)
4599{
4600 struct trace_array *tr = filp->private_data;
4601 char buf[64];
4602 int r, cpu;
4603 unsigned long size = 0, expanded_size = 0;
4604
4605 mutex_lock(&trace_types_lock);
4606 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004607 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004608 if (!ring_buffer_expanded)
4609 expanded_size += trace_buf_size >> 10;
4610 }
4611 if (ring_buffer_expanded)
4612 r = sprintf(buf, "%lu\n", size);
4613 else
4614 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4615 mutex_unlock(&trace_types_lock);
4616
4617 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4618}
4619
4620static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004621tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4622 size_t cnt, loff_t *ppos)
4623{
4624 /*
4625 * There is no need to read what the user has written, this function
4626 * is just to make sure that there is no error when "echo" is used
4627 */
4628
4629 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004630
4631 return cnt;
4632}
4633
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004634static int
4635tracing_free_buffer_release(struct inode *inode, struct file *filp)
4636{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004637 struct trace_array *tr = inode->i_private;
4638
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004639 /* disable tracing ? */
4640 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004641 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004642 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004643 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004644
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004645 trace_array_put(tr);
4646
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004647 return 0;
4648}
4649
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004650static ssize_t
4651tracing_mark_write(struct file *filp, const char __user *ubuf,
4652 size_t cnt, loff_t *fpos)
4653{
Steven Rostedtd696b582011-09-22 11:50:27 -04004654 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004655 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004656 struct ring_buffer_event *event;
4657 struct ring_buffer *buffer;
4658 struct print_entry *entry;
4659 unsigned long irq_flags;
4660 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004661 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004662 int nr_pages = 1;
4663 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004664 int offset;
4665 int size;
4666 int len;
4667 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004668 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004669
Steven Rostedtc76f0692008-11-07 22:36:02 -05004670 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004671 return -EINVAL;
4672
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004673 if (!(trace_flags & TRACE_ITER_MARKERS))
4674 return -EINVAL;
4675
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004676 if (cnt > TRACE_BUF_SIZE)
4677 cnt = TRACE_BUF_SIZE;
4678
Steven Rostedtd696b582011-09-22 11:50:27 -04004679 /*
4680 * Userspace is injecting traces into the kernel trace buffer.
4681 * We want to be as non intrusive as possible.
4682 * To do so, we do not want to allocate any special buffers
4683 * or take any locks, but instead write the userspace data
4684 * straight into the ring buffer.
4685 *
4686 * First we need to pin the userspace buffer into memory,
4687 * which, most likely it is, because it just referenced it.
4688 * But there's no guarantee that it is. By using get_user_pages_fast()
4689 * and kmap_atomic/kunmap_atomic() we can get access to the
4690 * pages directly. We then write the data directly into the
4691 * ring buffer.
4692 */
4693 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004694
Steven Rostedtd696b582011-09-22 11:50:27 -04004695 /* check if we cross pages */
4696 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4697 nr_pages = 2;
4698
4699 offset = addr & (PAGE_SIZE - 1);
4700 addr &= PAGE_MASK;
4701
4702 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4703 if (ret < nr_pages) {
4704 while (--ret >= 0)
4705 put_page(pages[ret]);
4706 written = -EFAULT;
4707 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004708 }
4709
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004710 for (i = 0; i < nr_pages; i++)
4711 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004712
4713 local_save_flags(irq_flags);
4714 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004715 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004716 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4717 irq_flags, preempt_count());
4718 if (!event) {
4719 /* Ring buffer disabled, return as if not open for write */
4720 written = -EBADF;
4721 goto out_unlock;
4722 }
4723
4724 entry = ring_buffer_event_data(event);
4725 entry->ip = _THIS_IP_;
4726
4727 if (nr_pages == 2) {
4728 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004729 memcpy(&entry->buf, map_page[0] + offset, len);
4730 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004731 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004732 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004733
4734 if (entry->buf[cnt - 1] != '\n') {
4735 entry->buf[cnt] = '\n';
4736 entry->buf[cnt + 1] = '\0';
4737 } else
4738 entry->buf[cnt] = '\0';
4739
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004740 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004741
4742 written = cnt;
4743
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004744 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004745
Steven Rostedtd696b582011-09-22 11:50:27 -04004746 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004747 for (i = 0; i < nr_pages; i++){
4748 kunmap_atomic(map_page[i]);
4749 put_page(pages[i]);
4750 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004751 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004752 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004753}
4754
Li Zefan13f16d22009-12-08 11:16:11 +08004755static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004756{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004757 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004758 int i;
4759
4760 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004761 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004762 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004763 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4764 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004765 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004766
Li Zefan13f16d22009-12-08 11:16:11 +08004767 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004768}
4769
Steven Rostedte1e232c2014-02-10 23:38:46 -05004770static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004771{
Zhaolei5079f322009-08-25 16:12:56 +08004772 int i;
4773
Zhaolei5079f322009-08-25 16:12:56 +08004774 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4775 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4776 break;
4777 }
4778 if (i == ARRAY_SIZE(trace_clocks))
4779 return -EINVAL;
4780
Zhaolei5079f322009-08-25 16:12:56 +08004781 mutex_lock(&trace_types_lock);
4782
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004783 tr->clock_id = i;
4784
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004785 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004786
David Sharp60303ed2012-10-11 16:27:52 -07004787 /*
4788 * New clock may not be consistent with the previous clock.
4789 * Reset the buffer so that it doesn't have incomparable timestamps.
4790 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004791 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004792
4793#ifdef CONFIG_TRACER_MAX_TRACE
4794 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4795 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004796 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004797#endif
David Sharp60303ed2012-10-11 16:27:52 -07004798
Zhaolei5079f322009-08-25 16:12:56 +08004799 mutex_unlock(&trace_types_lock);
4800
Steven Rostedte1e232c2014-02-10 23:38:46 -05004801 return 0;
4802}
4803
4804static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4805 size_t cnt, loff_t *fpos)
4806{
4807 struct seq_file *m = filp->private_data;
4808 struct trace_array *tr = m->private;
4809 char buf[64];
4810 const char *clockstr;
4811 int ret;
4812
4813 if (cnt >= sizeof(buf))
4814 return -EINVAL;
4815
4816 if (copy_from_user(&buf, ubuf, cnt))
4817 return -EFAULT;
4818
4819 buf[cnt] = 0;
4820
4821 clockstr = strstrip(buf);
4822
4823 ret = tracing_set_clock(tr, clockstr);
4824 if (ret)
4825 return ret;
4826
Zhaolei5079f322009-08-25 16:12:56 +08004827 *fpos += cnt;
4828
4829 return cnt;
4830}
4831
Li Zefan13f16d22009-12-08 11:16:11 +08004832static int tracing_clock_open(struct inode *inode, struct file *file)
4833{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004834 struct trace_array *tr = inode->i_private;
4835 int ret;
4836
Li Zefan13f16d22009-12-08 11:16:11 +08004837 if (tracing_disabled)
4838 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004839
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004840 if (trace_array_get(tr))
4841 return -ENODEV;
4842
4843 ret = single_open(file, tracing_clock_show, inode->i_private);
4844 if (ret < 0)
4845 trace_array_put(tr);
4846
4847 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004848}
4849
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004850struct ftrace_buffer_info {
4851 struct trace_iterator iter;
4852 void *spare;
4853 unsigned int read;
4854};
4855
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004856#ifdef CONFIG_TRACER_SNAPSHOT
4857static int tracing_snapshot_open(struct inode *inode, struct file *file)
4858{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004859 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004860 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004861 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004862 int ret = 0;
4863
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004864 if (trace_array_get(tr) < 0)
4865 return -ENODEV;
4866
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004867 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004868 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004869 if (IS_ERR(iter))
4870 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004871 } else {
4872 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004873 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004874 m = kzalloc(sizeof(*m), GFP_KERNEL);
4875 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004876 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004877 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4878 if (!iter) {
4879 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004880 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004881 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004882 ret = 0;
4883
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004884 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004885 iter->trace_buffer = &tr->max_buffer;
4886 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004887 m->private = iter;
4888 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004889 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004890out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004891 if (ret < 0)
4892 trace_array_put(tr);
4893
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004894 return ret;
4895}
4896
4897static ssize_t
4898tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4899 loff_t *ppos)
4900{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004901 struct seq_file *m = filp->private_data;
4902 struct trace_iterator *iter = m->private;
4903 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004904 unsigned long val;
4905 int ret;
4906
4907 ret = tracing_update_buffers();
4908 if (ret < 0)
4909 return ret;
4910
4911 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4912 if (ret)
4913 return ret;
4914
4915 mutex_lock(&trace_types_lock);
4916
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004917 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004918 ret = -EBUSY;
4919 goto out;
4920 }
4921
4922 switch (val) {
4923 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004924 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4925 ret = -EINVAL;
4926 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004927 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004928 if (tr->allocated_snapshot)
4929 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004930 break;
4931 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004932/* Only allow per-cpu swap if the ring buffer supports it */
4933#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4934 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4935 ret = -EINVAL;
4936 break;
4937 }
4938#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004939 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004940 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004941 if (ret < 0)
4942 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004943 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004944 local_irq_disable();
4945 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004946 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004947 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004948 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004949 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004950 local_irq_enable();
4951 break;
4952 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004953 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004954 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4955 tracing_reset_online_cpus(&tr->max_buffer);
4956 else
4957 tracing_reset(&tr->max_buffer, iter->cpu_file);
4958 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004959 break;
4960 }
4961
4962 if (ret >= 0) {
4963 *ppos += cnt;
4964 ret = cnt;
4965 }
4966out:
4967 mutex_unlock(&trace_types_lock);
4968 return ret;
4969}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004970
4971static int tracing_snapshot_release(struct inode *inode, struct file *file)
4972{
4973 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004974 int ret;
4975
4976 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004977
4978 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004979 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004980
4981 /* If write only, the seq_file is just a stub */
4982 if (m)
4983 kfree(m->private);
4984 kfree(m);
4985
4986 return 0;
4987}
4988
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004989static int tracing_buffers_open(struct inode *inode, struct file *filp);
4990static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4991 size_t count, loff_t *ppos);
4992static int tracing_buffers_release(struct inode *inode, struct file *file);
4993static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4994 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4995
4996static int snapshot_raw_open(struct inode *inode, struct file *filp)
4997{
4998 struct ftrace_buffer_info *info;
4999 int ret;
5000
5001 ret = tracing_buffers_open(inode, filp);
5002 if (ret < 0)
5003 return ret;
5004
5005 info = filp->private_data;
5006
5007 if (info->iter.trace->use_max_tr) {
5008 tracing_buffers_release(inode, filp);
5009 return -EBUSY;
5010 }
5011
5012 info->iter.snapshot = true;
5013 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5014
5015 return ret;
5016}
5017
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005018#endif /* CONFIG_TRACER_SNAPSHOT */
5019
5020
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005021static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005022 .open = tracing_open_generic,
5023 .read = tracing_max_lat_read,
5024 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005025 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005026};
5027
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005028static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005029 .open = tracing_open_generic,
5030 .read = tracing_set_trace_read,
5031 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005032 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005033};
5034
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005035static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005036 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005037 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005038 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005039 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005040 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005041 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005042};
5043
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005044static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005045 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005046 .read = tracing_entries_read,
5047 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005048 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005049 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005050};
5051
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005052static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005053 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005054 .read = tracing_total_entries_read,
5055 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005056 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005057};
5058
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005059static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005060 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005061 .write = tracing_free_buffer_write,
5062 .release = tracing_free_buffer_release,
5063};
5064
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005065static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005066 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005067 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005068 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005069 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005070};
5071
Zhaolei5079f322009-08-25 16:12:56 +08005072static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005073 .open = tracing_clock_open,
5074 .read = seq_read,
5075 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005076 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005077 .write = tracing_clock_write,
5078};
5079
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005080#ifdef CONFIG_TRACER_SNAPSHOT
5081static const struct file_operations snapshot_fops = {
5082 .open = tracing_snapshot_open,
5083 .read = seq_read,
5084 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005085 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005086 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005087};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005088
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005089static const struct file_operations snapshot_raw_fops = {
5090 .open = snapshot_raw_open,
5091 .read = tracing_buffers_read,
5092 .release = tracing_buffers_release,
5093 .splice_read = tracing_buffers_splice_read,
5094 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005095};
5096
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005097#endif /* CONFIG_TRACER_SNAPSHOT */
5098
Steven Rostedt2cadf912008-12-01 22:20:19 -05005099static int tracing_buffers_open(struct inode *inode, struct file *filp)
5100{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005101 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005102 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005103 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005104
5105 if (tracing_disabled)
5106 return -ENODEV;
5107
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005108 if (trace_array_get(tr) < 0)
5109 return -ENODEV;
5110
Steven Rostedt2cadf912008-12-01 22:20:19 -05005111 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005112 if (!info) {
5113 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005114 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005115 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005116
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005117 mutex_lock(&trace_types_lock);
5118
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005119 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005120 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005121 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005122 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005123 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005124 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005125 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005126
5127 filp->private_data = info;
5128
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005129 mutex_unlock(&trace_types_lock);
5130
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005131 ret = nonseekable_open(inode, filp);
5132 if (ret < 0)
5133 trace_array_put(tr);
5134
5135 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005136}
5137
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005138static unsigned int
5139tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5140{
5141 struct ftrace_buffer_info *info = filp->private_data;
5142 struct trace_iterator *iter = &info->iter;
5143
5144 return trace_poll(iter, filp, poll_table);
5145}
5146
Steven Rostedt2cadf912008-12-01 22:20:19 -05005147static ssize_t
5148tracing_buffers_read(struct file *filp, char __user *ubuf,
5149 size_t count, loff_t *ppos)
5150{
5151 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005152 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005153 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005154 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005155
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005156 if (!count)
5157 return 0;
5158
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005159 mutex_lock(&trace_types_lock);
5160
5161#ifdef CONFIG_TRACER_MAX_TRACE
5162 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5163 size = -EBUSY;
5164 goto out_unlock;
5165 }
5166#endif
5167
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005168 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005169 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5170 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005171 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005172 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005173 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005174
Steven Rostedt2cadf912008-12-01 22:20:19 -05005175 /* Do we have previous read data to read? */
5176 if (info->read < PAGE_SIZE)
5177 goto read;
5178
Steven Rostedtb6273442013-02-28 13:44:11 -05005179 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005180 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005181 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005182 &info->spare,
5183 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005184 iter->cpu_file, 0);
5185 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005186
5187 if (ret < 0) {
5188 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005189 if ((filp->f_flags & O_NONBLOCK)) {
5190 size = -EAGAIN;
5191 goto out_unlock;
5192 }
5193 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005194 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005195 mutex_lock(&trace_types_lock);
5196 if (signal_pending(current)) {
5197 size = -EINTR;
5198 goto out_unlock;
5199 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005200 goto again;
5201 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005202 size = 0;
5203 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005204 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005205
Steven Rostedt436fc282011-10-14 10:44:25 -04005206 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005207 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005208 size = PAGE_SIZE - info->read;
5209 if (size > count)
5210 size = count;
5211
5212 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005213 if (ret == size) {
5214 size = -EFAULT;
5215 goto out_unlock;
5216 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005217 size -= ret;
5218
Steven Rostedt2cadf912008-12-01 22:20:19 -05005219 *ppos += size;
5220 info->read += size;
5221
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005222 out_unlock:
5223 mutex_unlock(&trace_types_lock);
5224
Steven Rostedt2cadf912008-12-01 22:20:19 -05005225 return size;
5226}
5227
5228static int tracing_buffers_release(struct inode *inode, struct file *file)
5229{
5230 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005231 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005232
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005233 mutex_lock(&trace_types_lock);
5234
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005235 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005236
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005237 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005238 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005239 kfree(info);
5240
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005241 mutex_unlock(&trace_types_lock);
5242
Steven Rostedt2cadf912008-12-01 22:20:19 -05005243 return 0;
5244}
5245
5246struct buffer_ref {
5247 struct ring_buffer *buffer;
5248 void *page;
5249 int ref;
5250};
5251
5252static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5253 struct pipe_buffer *buf)
5254{
5255 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5256
5257 if (--ref->ref)
5258 return;
5259
5260 ring_buffer_free_read_page(ref->buffer, ref->page);
5261 kfree(ref);
5262 buf->private = 0;
5263}
5264
Steven Rostedt2cadf912008-12-01 22:20:19 -05005265static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5266 struct pipe_buffer *buf)
5267{
5268 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5269
5270 ref->ref++;
5271}
5272
5273/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005274static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005275 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276 .confirm = generic_pipe_buf_confirm,
5277 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005278 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005279 .get = buffer_pipe_buf_get,
5280};
5281
5282/*
5283 * Callback from splice_to_pipe(), if we need to release some pages
5284 * at the end of the spd in case we error'ed out in filling the pipe.
5285 */
5286static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5287{
5288 struct buffer_ref *ref =
5289 (struct buffer_ref *)spd->partial[i].private;
5290
5291 if (--ref->ref)
5292 return;
5293
5294 ring_buffer_free_read_page(ref->buffer, ref->page);
5295 kfree(ref);
5296 spd->partial[i].private = 0;
5297}
5298
5299static ssize_t
5300tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5301 struct pipe_inode_info *pipe, size_t len,
5302 unsigned int flags)
5303{
5304 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005305 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005306 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5307 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005308 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005309 .pages = pages_def,
5310 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005311 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005312 .flags = flags,
5313 .ops = &buffer_pipe_buf_ops,
5314 .spd_release = buffer_spd_release,
5315 };
5316 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005317 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005318 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005319
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005320 mutex_lock(&trace_types_lock);
5321
5322#ifdef CONFIG_TRACER_MAX_TRACE
5323 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5324 ret = -EBUSY;
5325 goto out;
5326 }
5327#endif
5328
5329 if (splice_grow_spd(pipe, &spd)) {
5330 ret = -ENOMEM;
5331 goto out;
5332 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005333
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005334 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005335 ret = -EINVAL;
5336 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005337 }
5338
5339 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005340 if (len < PAGE_SIZE) {
5341 ret = -EINVAL;
5342 goto out;
5343 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005344 len &= PAGE_MASK;
5345 }
5346
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005347 again:
5348 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005349 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005350
Al Viroa786c062014-04-11 12:01:03 -04005351 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005352 struct page *page;
5353 int r;
5354
5355 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5356 if (!ref)
5357 break;
5358
Steven Rostedt7267fa62009-04-29 00:16:21 -04005359 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005360 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005361 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362 if (!ref->page) {
5363 kfree(ref);
5364 break;
5365 }
5366
5367 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005368 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005369 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005370 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005371 kfree(ref);
5372 break;
5373 }
5374
5375 /*
5376 * zero out any left over data, this is going to
5377 * user land.
5378 */
5379 size = ring_buffer_page_len(ref->page);
5380 if (size < PAGE_SIZE)
5381 memset(ref->page + size, 0, PAGE_SIZE - size);
5382
5383 page = virt_to_page(ref->page);
5384
5385 spd.pages[i] = page;
5386 spd.partial[i].len = PAGE_SIZE;
5387 spd.partial[i].offset = 0;
5388 spd.partial[i].private = (unsigned long)ref;
5389 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005390 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005392 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005393 }
5394
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005395 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005396 spd.nr_pages = i;
5397
5398 /* did we read anything? */
5399 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005400 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005401 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005402 goto out;
5403 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005404 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005405 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005406 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005407 if (signal_pending(current)) {
5408 ret = -EINTR;
5409 goto out;
5410 }
5411 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005412 }
5413
5414 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005415 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005416out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005417 mutex_unlock(&trace_types_lock);
5418
Steven Rostedt2cadf912008-12-01 22:20:19 -05005419 return ret;
5420}
5421
5422static const struct file_operations tracing_buffers_fops = {
5423 .open = tracing_buffers_open,
5424 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005425 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005426 .release = tracing_buffers_release,
5427 .splice_read = tracing_buffers_splice_read,
5428 .llseek = no_llseek,
5429};
5430
Steven Rostedtc8d77182009-04-29 18:03:45 -04005431static ssize_t
5432tracing_stats_read(struct file *filp, char __user *ubuf,
5433 size_t count, loff_t *ppos)
5434{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005435 struct inode *inode = file_inode(filp);
5436 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005437 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005438 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005439 struct trace_seq *s;
5440 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005441 unsigned long long t;
5442 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005443
Li Zefane4f2d102009-06-15 10:57:28 +08005444 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005445 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005446 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005447
5448 trace_seq_init(s);
5449
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005450 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005451 trace_seq_printf(s, "entries: %ld\n", cnt);
5452
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005453 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005454 trace_seq_printf(s, "overrun: %ld\n", cnt);
5455
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005456 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005457 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5458
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005459 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005460 trace_seq_printf(s, "bytes: %ld\n", cnt);
5461
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005462 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005463 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005464 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005465 usec_rem = do_div(t, USEC_PER_SEC);
5466 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5467 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005468
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005469 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005470 usec_rem = do_div(t, USEC_PER_SEC);
5471 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5472 } else {
5473 /* counter or tsc mode for trace_clock */
5474 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005475 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005476
5477 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005478 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005479 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005481 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005482 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5483
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005484 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005485 trace_seq_printf(s, "read events: %ld\n", cnt);
5486
Steven Rostedtc8d77182009-04-29 18:03:45 -04005487 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5488
5489 kfree(s);
5490
5491 return count;
5492}
5493
5494static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005495 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005496 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005497 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005498 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005499};
5500
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005501#ifdef CONFIG_DYNAMIC_FTRACE
5502
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005503int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005504{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005505 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005506}
5507
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005508static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005509tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005510 size_t cnt, loff_t *ppos)
5511{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005512 static char ftrace_dyn_info_buffer[1024];
5513 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005514 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005515 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005516 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005517 int r;
5518
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005519 mutex_lock(&dyn_info_mutex);
5520 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005521
Steven Rostedta26a2a22008-10-31 00:03:22 -04005522 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005523 buf[r++] = '\n';
5524
5525 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5526
5527 mutex_unlock(&dyn_info_mutex);
5528
5529 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005530}
5531
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005532static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005533 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005534 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005535 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005536};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005537#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005538
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005539#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5540static void
5541ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005542{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005543 tracing_snapshot();
5544}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005545
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005546static void
5547ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5548{
5549 unsigned long *count = (long *)data;
5550
5551 if (!*count)
5552 return;
5553
5554 if (*count != -1)
5555 (*count)--;
5556
5557 tracing_snapshot();
5558}
5559
5560static int
5561ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5562 struct ftrace_probe_ops *ops, void *data)
5563{
5564 long count = (long)data;
5565
5566 seq_printf(m, "%ps:", (void *)ip);
5567
5568 seq_printf(m, "snapshot");
5569
5570 if (count == -1)
5571 seq_printf(m, ":unlimited\n");
5572 else
5573 seq_printf(m, ":count=%ld\n", count);
5574
5575 return 0;
5576}
5577
5578static struct ftrace_probe_ops snapshot_probe_ops = {
5579 .func = ftrace_snapshot,
5580 .print = ftrace_snapshot_print,
5581};
5582
5583static struct ftrace_probe_ops snapshot_count_probe_ops = {
5584 .func = ftrace_count_snapshot,
5585 .print = ftrace_snapshot_print,
5586};
5587
5588static int
5589ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5590 char *glob, char *cmd, char *param, int enable)
5591{
5592 struct ftrace_probe_ops *ops;
5593 void *count = (void *)-1;
5594 char *number;
5595 int ret;
5596
5597 /* hash funcs only work with set_ftrace_filter */
5598 if (!enable)
5599 return -EINVAL;
5600
5601 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5602
5603 if (glob[0] == '!') {
5604 unregister_ftrace_function_probe_func(glob+1, ops);
5605 return 0;
5606 }
5607
5608 if (!param)
5609 goto out_reg;
5610
5611 number = strsep(&param, ":");
5612
5613 if (!strlen(number))
5614 goto out_reg;
5615
5616 /*
5617 * We use the callback data field (which is a pointer)
5618 * as our counter.
5619 */
5620 ret = kstrtoul(number, 0, (unsigned long *)&count);
5621 if (ret)
5622 return ret;
5623
5624 out_reg:
5625 ret = register_ftrace_function_probe(glob, ops, count);
5626
5627 if (ret >= 0)
5628 alloc_snapshot(&global_trace);
5629
5630 return ret < 0 ? ret : 0;
5631}
5632
5633static struct ftrace_func_command ftrace_snapshot_cmd = {
5634 .name = "snapshot",
5635 .func = ftrace_trace_snapshot_callback,
5636};
5637
Tom Zanussi38de93a2013-10-24 08:34:18 -05005638static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005639{
5640 return register_ftrace_command(&ftrace_snapshot_cmd);
5641}
5642#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005643static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005644#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005645
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005646struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005647{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005648 if (tr->dir)
5649 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005650
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005651 if (!debugfs_initialized())
5652 return NULL;
5653
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005654 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5655 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005656
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005657 if (!tr->dir)
5658 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005659
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005660 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005661}
5662
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005663struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005664{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005665 return tracing_init_dentry_tr(&global_trace);
5666}
5667
5668static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5669{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005670 struct dentry *d_tracer;
5671
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005672 if (tr->percpu_dir)
5673 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005674
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005675 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005676 if (!d_tracer)
5677 return NULL;
5678
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005679 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005680
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005681 WARN_ONCE(!tr->percpu_dir,
5682 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005683
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005684 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005685}
5686
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005687static struct dentry *
5688trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5689 void *data, long cpu, const struct file_operations *fops)
5690{
5691 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5692
5693 if (ret) /* See tracing_get_cpu() */
5694 ret->d_inode->i_cdev = (void *)(cpu + 1);
5695 return ret;
5696}
5697
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005698static void
5699tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005700{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005701 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005702 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005703 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005704
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005705 if (!d_percpu)
5706 return;
5707
Steven Rostedtdd49a382010-10-20 21:51:26 -04005708 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005709 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5710 if (!d_cpu) {
5711 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5712 return;
5713 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005714
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005715 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005716 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005717 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005718
5719 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005720 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005721 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005722
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005723 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005724 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005725
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005726 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005727 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005728
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005729 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005730 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005731
5732#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005733 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005734 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005735
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005736 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005737 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005738#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005739}
5740
Steven Rostedt60a11772008-05-12 21:20:44 +02005741#ifdef CONFIG_FTRACE_SELFTEST
5742/* Let selftest have access to static functions in this file */
5743#include "trace_selftest.c"
5744#endif
5745
Steven Rostedt577b7852009-02-26 23:43:05 -05005746struct trace_option_dentry {
5747 struct tracer_opt *opt;
5748 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005749 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005750 struct dentry *entry;
5751};
5752
5753static ssize_t
5754trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5755 loff_t *ppos)
5756{
5757 struct trace_option_dentry *topt = filp->private_data;
5758 char *buf;
5759
5760 if (topt->flags->val & topt->opt->bit)
5761 buf = "1\n";
5762 else
5763 buf = "0\n";
5764
5765 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5766}
5767
5768static ssize_t
5769trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5770 loff_t *ppos)
5771{
5772 struct trace_option_dentry *topt = filp->private_data;
5773 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005774 int ret;
5775
Peter Huewe22fe9b52011-06-07 21:58:27 +02005776 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5777 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005778 return ret;
5779
Li Zefan8d18eaa2009-12-08 11:17:06 +08005780 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005781 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005782
5783 if (!!(topt->flags->val & topt->opt->bit) != val) {
5784 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005785 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005786 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005787 mutex_unlock(&trace_types_lock);
5788 if (ret)
5789 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005790 }
5791
5792 *ppos += cnt;
5793
5794 return cnt;
5795}
5796
5797
5798static const struct file_operations trace_options_fops = {
5799 .open = tracing_open_generic,
5800 .read = trace_options_read,
5801 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005802 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005803};
5804
Steven Rostedta8259072009-02-26 22:19:12 -05005805static ssize_t
5806trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5807 loff_t *ppos)
5808{
5809 long index = (long)filp->private_data;
5810 char *buf;
5811
5812 if (trace_flags & (1 << index))
5813 buf = "1\n";
5814 else
5815 buf = "0\n";
5816
5817 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5818}
5819
5820static ssize_t
5821trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5822 loff_t *ppos)
5823{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005824 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005825 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005826 unsigned long val;
5827 int ret;
5828
Peter Huewe22fe9b52011-06-07 21:58:27 +02005829 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5830 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005831 return ret;
5832
Zhaoleif2d84b62009-08-07 18:55:48 +08005833 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005834 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005835
5836 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005837 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005838 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005839
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005840 if (ret < 0)
5841 return ret;
5842
Steven Rostedta8259072009-02-26 22:19:12 -05005843 *ppos += cnt;
5844
5845 return cnt;
5846}
5847
Steven Rostedta8259072009-02-26 22:19:12 -05005848static const struct file_operations trace_options_core_fops = {
5849 .open = tracing_open_generic,
5850 .read = trace_options_core_read,
5851 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005852 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005853};
5854
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005855struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005856 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005857 struct dentry *parent,
5858 void *data,
5859 const struct file_operations *fops)
5860{
5861 struct dentry *ret;
5862
5863 ret = debugfs_create_file(name, mode, parent, data, fops);
5864 if (!ret)
5865 pr_warning("Could not create debugfs '%s' entry\n", name);
5866
5867 return ret;
5868}
5869
5870
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005871static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005872{
5873 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005874
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005875 if (tr->options)
5876 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005877
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005878 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005879 if (!d_tracer)
5880 return NULL;
5881
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005882 tr->options = debugfs_create_dir("options", d_tracer);
5883 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005884 pr_warning("Could not create debugfs directory 'options'\n");
5885 return NULL;
5886 }
5887
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005888 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005889}
5890
Steven Rostedt577b7852009-02-26 23:43:05 -05005891static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005892create_trace_option_file(struct trace_array *tr,
5893 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005894 struct tracer_flags *flags,
5895 struct tracer_opt *opt)
5896{
5897 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005898
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005899 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005900 if (!t_options)
5901 return;
5902
5903 topt->flags = flags;
5904 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005905 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005906
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005907 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005908 &trace_options_fops);
5909
Steven Rostedt577b7852009-02-26 23:43:05 -05005910}
5911
5912static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005913create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005914{
5915 struct trace_option_dentry *topts;
5916 struct tracer_flags *flags;
5917 struct tracer_opt *opts;
5918 int cnt;
5919
5920 if (!tracer)
5921 return NULL;
5922
5923 flags = tracer->flags;
5924
5925 if (!flags || !flags->opts)
5926 return NULL;
5927
5928 opts = flags->opts;
5929
5930 for (cnt = 0; opts[cnt].name; cnt++)
5931 ;
5932
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005933 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005934 if (!topts)
5935 return NULL;
5936
5937 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005938 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005939 &opts[cnt]);
5940
5941 return topts;
5942}
5943
5944static void
5945destroy_trace_option_files(struct trace_option_dentry *topts)
5946{
5947 int cnt;
5948
5949 if (!topts)
5950 return;
5951
5952 for (cnt = 0; topts[cnt].opt; cnt++) {
5953 if (topts[cnt].entry)
5954 debugfs_remove(topts[cnt].entry);
5955 }
5956
5957 kfree(topts);
5958}
5959
Steven Rostedta8259072009-02-26 22:19:12 -05005960static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005961create_trace_option_core_file(struct trace_array *tr,
5962 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005963{
5964 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005965
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005966 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005967 if (!t_options)
5968 return NULL;
5969
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005970 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005971 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005972}
5973
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005974static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005975{
5976 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005977 int i;
5978
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005979 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005980 if (!t_options)
5981 return;
5982
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005983 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005984 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005985}
5986
Steven Rostedt499e5472012-02-22 15:50:28 -05005987static ssize_t
5988rb_simple_read(struct file *filp, char __user *ubuf,
5989 size_t cnt, loff_t *ppos)
5990{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005991 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005992 char buf[64];
5993 int r;
5994
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005995 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005996 r = sprintf(buf, "%d\n", r);
5997
5998 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5999}
6000
6001static ssize_t
6002rb_simple_write(struct file *filp, const char __user *ubuf,
6003 size_t cnt, loff_t *ppos)
6004{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006005 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006006 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006007 unsigned long val;
6008 int ret;
6009
6010 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6011 if (ret)
6012 return ret;
6013
6014 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006015 mutex_lock(&trace_types_lock);
6016 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006017 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006018 if (tr->current_trace->start)
6019 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006020 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006021 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006022 if (tr->current_trace->stop)
6023 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006024 }
6025 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006026 }
6027
6028 (*ppos)++;
6029
6030 return cnt;
6031}
6032
6033static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006034 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006035 .read = rb_simple_read,
6036 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006037 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006038 .llseek = default_llseek,
6039};
6040
Steven Rostedt277ba042012-08-03 16:10:49 -04006041struct dentry *trace_instance_dir;
6042
6043static void
6044init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6045
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006046static int
6047allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006048{
6049 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006050
6051 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6052
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006053 buf->tr = tr;
6054
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006055 buf->buffer = ring_buffer_alloc(size, rb_flags);
6056 if (!buf->buffer)
6057 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006058
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006059 buf->data = alloc_percpu(struct trace_array_cpu);
6060 if (!buf->data) {
6061 ring_buffer_free(buf->buffer);
6062 return -ENOMEM;
6063 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006064
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006065 /* Allocate the first page for all buffers */
6066 set_buffer_entries(&tr->trace_buffer,
6067 ring_buffer_size(tr->trace_buffer.buffer, 0));
6068
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006069 return 0;
6070}
6071
6072static int allocate_trace_buffers(struct trace_array *tr, int size)
6073{
6074 int ret;
6075
6076 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6077 if (ret)
6078 return ret;
6079
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006080#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006081 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6082 allocate_snapshot ? size : 1);
6083 if (WARN_ON(ret)) {
6084 ring_buffer_free(tr->trace_buffer.buffer);
6085 free_percpu(tr->trace_buffer.data);
6086 return -ENOMEM;
6087 }
6088 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006089
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006090 /*
6091 * Only the top level trace array gets its snapshot allocated
6092 * from the kernel command line.
6093 */
6094 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006095#endif
6096 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006097}
6098
6099static int new_instance_create(const char *name)
6100{
Steven Rostedt277ba042012-08-03 16:10:49 -04006101 struct trace_array *tr;
6102 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006103
6104 mutex_lock(&trace_types_lock);
6105
6106 ret = -EEXIST;
6107 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6108 if (tr->name && strcmp(tr->name, name) == 0)
6109 goto out_unlock;
6110 }
6111
6112 ret = -ENOMEM;
6113 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6114 if (!tr)
6115 goto out_unlock;
6116
6117 tr->name = kstrdup(name, GFP_KERNEL);
6118 if (!tr->name)
6119 goto out_free_tr;
6120
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006121 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6122 goto out_free_tr;
6123
6124 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6125
Steven Rostedt277ba042012-08-03 16:10:49 -04006126 raw_spin_lock_init(&tr->start_lock);
6127
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006128 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6129
Steven Rostedt277ba042012-08-03 16:10:49 -04006130 tr->current_trace = &nop_trace;
6131
6132 INIT_LIST_HEAD(&tr->systems);
6133 INIT_LIST_HEAD(&tr->events);
6134
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006135 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006136 goto out_free_tr;
6137
Steven Rostedt277ba042012-08-03 16:10:49 -04006138 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6139 if (!tr->dir)
6140 goto out_free_tr;
6141
6142 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006143 if (ret) {
6144 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006145 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006146 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006147
6148 init_tracer_debugfs(tr, tr->dir);
6149
6150 list_add(&tr->list, &ftrace_trace_arrays);
6151
6152 mutex_unlock(&trace_types_lock);
6153
6154 return 0;
6155
6156 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006157 if (tr->trace_buffer.buffer)
6158 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006159 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006160 kfree(tr->name);
6161 kfree(tr);
6162
6163 out_unlock:
6164 mutex_unlock(&trace_types_lock);
6165
6166 return ret;
6167
6168}
6169
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006170static int instance_delete(const char *name)
6171{
6172 struct trace_array *tr;
6173 int found = 0;
6174 int ret;
6175
6176 mutex_lock(&trace_types_lock);
6177
6178 ret = -ENODEV;
6179 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6180 if (tr->name && strcmp(tr->name, name) == 0) {
6181 found = 1;
6182 break;
6183 }
6184 }
6185 if (!found)
6186 goto out_unlock;
6187
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006188 ret = -EBUSY;
6189 if (tr->ref)
6190 goto out_unlock;
6191
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006192 list_del(&tr->list);
6193
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006194 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006195 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006196 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006197 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006198 free_percpu(tr->trace_buffer.data);
6199 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006200
6201 kfree(tr->name);
6202 kfree(tr);
6203
6204 ret = 0;
6205
6206 out_unlock:
6207 mutex_unlock(&trace_types_lock);
6208
6209 return ret;
6210}
6211
Steven Rostedt277ba042012-08-03 16:10:49 -04006212static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6213{
6214 struct dentry *parent;
6215 int ret;
6216
6217 /* Paranoid: Make sure the parent is the "instances" directory */
6218 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6219 if (WARN_ON_ONCE(parent != trace_instance_dir))
6220 return -ENOENT;
6221
6222 /*
6223 * The inode mutex is locked, but debugfs_create_dir() will also
6224 * take the mutex. As the instances directory can not be destroyed
6225 * or changed in any other way, it is safe to unlock it, and
6226 * let the dentry try. If two users try to make the same dir at
6227 * the same time, then the new_instance_create() will determine the
6228 * winner.
6229 */
6230 mutex_unlock(&inode->i_mutex);
6231
6232 ret = new_instance_create(dentry->d_iname);
6233
6234 mutex_lock(&inode->i_mutex);
6235
6236 return ret;
6237}
6238
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006239static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6240{
6241 struct dentry *parent;
6242 int ret;
6243
6244 /* Paranoid: Make sure the parent is the "instances" directory */
6245 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6246 if (WARN_ON_ONCE(parent != trace_instance_dir))
6247 return -ENOENT;
6248
6249 /* The caller did a dget() on dentry */
6250 mutex_unlock(&dentry->d_inode->i_mutex);
6251
6252 /*
6253 * The inode mutex is locked, but debugfs_create_dir() will also
6254 * take the mutex. As the instances directory can not be destroyed
6255 * or changed in any other way, it is safe to unlock it, and
6256 * let the dentry try. If two users try to make the same dir at
6257 * the same time, then the instance_delete() will determine the
6258 * winner.
6259 */
6260 mutex_unlock(&inode->i_mutex);
6261
6262 ret = instance_delete(dentry->d_iname);
6263
6264 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6265 mutex_lock(&dentry->d_inode->i_mutex);
6266
6267 return ret;
6268}
6269
Steven Rostedt277ba042012-08-03 16:10:49 -04006270static const struct inode_operations instance_dir_inode_operations = {
6271 .lookup = simple_lookup,
6272 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006273 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006274};
6275
6276static __init void create_trace_instances(struct dentry *d_tracer)
6277{
6278 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6279 if (WARN_ON(!trace_instance_dir))
6280 return;
6281
6282 /* Hijack the dir inode operations, to allow mkdir */
6283 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6284}
6285
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006286static void
6287init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6288{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006289 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006290
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006291 trace_create_file("available_tracers", 0444, d_tracer,
6292 tr, &show_traces_fops);
6293
6294 trace_create_file("current_tracer", 0644, d_tracer,
6295 tr, &set_tracer_fops);
6296
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006297 trace_create_file("tracing_cpumask", 0644, d_tracer,
6298 tr, &tracing_cpumask_fops);
6299
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006300 trace_create_file("trace_options", 0644, d_tracer,
6301 tr, &tracing_iter_fops);
6302
6303 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006304 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006305
6306 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006307 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006308
6309 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006310 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006311
6312 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6313 tr, &tracing_total_entries_fops);
6314
Wang YanQing238ae932013-05-26 16:52:01 +08006315 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006316 tr, &tracing_free_buffer_fops);
6317
6318 trace_create_file("trace_marker", 0220, d_tracer,
6319 tr, &tracing_mark_fops);
6320
6321 trace_create_file("trace_clock", 0644, d_tracer, tr,
6322 &trace_clock_fops);
6323
6324 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006325 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006326
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006327#ifdef CONFIG_TRACER_MAX_TRACE
6328 trace_create_file("tracing_max_latency", 0644, d_tracer,
6329 &tr->max_latency, &tracing_max_lat_fops);
6330#endif
6331
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006332 if (ftrace_create_function_files(tr, d_tracer))
6333 WARN(1, "Could not allocate function filter files");
6334
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006335#ifdef CONFIG_TRACER_SNAPSHOT
6336 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006337 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006338#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006339
6340 for_each_tracing_cpu(cpu)
6341 tracing_init_debugfs_percpu(tr, cpu);
6342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006343}
6344
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006345static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006346{
6347 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006348
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006349 trace_access_lock_init();
6350
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006351 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006352 if (!d_tracer)
6353 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006354
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006355 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006356
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006357 trace_create_file("tracing_thresh", 0644, d_tracer,
6358 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006359
Li Zefan339ae5d2009-04-17 10:34:30 +08006360 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006361 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006362
Avadh Patel69abe6a2009-04-10 16:04:48 -04006363 trace_create_file("saved_cmdlines", 0444, d_tracer,
6364 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006365
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006366#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006367 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6368 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006369#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006370
Steven Rostedt277ba042012-08-03 16:10:49 -04006371 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006372
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006373 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006374
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006375 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006376}
6377
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006378static int trace_panic_handler(struct notifier_block *this,
6379 unsigned long event, void *unused)
6380{
Steven Rostedt944ac422008-10-23 19:26:08 -04006381 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006382 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006383 return NOTIFY_OK;
6384}
6385
6386static struct notifier_block trace_panic_notifier = {
6387 .notifier_call = trace_panic_handler,
6388 .next = NULL,
6389 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6390};
6391
6392static int trace_die_handler(struct notifier_block *self,
6393 unsigned long val,
6394 void *data)
6395{
6396 switch (val) {
6397 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006398 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006399 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006400 break;
6401 default:
6402 break;
6403 }
6404 return NOTIFY_OK;
6405}
6406
6407static struct notifier_block trace_die_notifier = {
6408 .notifier_call = trace_die_handler,
6409 .priority = 200
6410};
6411
6412/*
6413 * printk is set to max of 1024, we really don't need it that big.
6414 * Nothing should be printing 1000 characters anyway.
6415 */
6416#define TRACE_MAX_PRINT 1000
6417
6418/*
6419 * Define here KERN_TRACE so that we have one place to modify
6420 * it if we decide to change what log level the ftrace dump
6421 * should be at.
6422 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006423#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006424
Jason Wessel955b61e2010-08-05 09:22:23 -05006425void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006426trace_printk_seq(struct trace_seq *s)
6427{
6428 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006429 if (s->len >= TRACE_MAX_PRINT)
6430 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006431
6432 /* should be zero ended, but we are paranoid. */
6433 s->buffer[s->len] = 0;
6434
6435 printk(KERN_TRACE "%s", s->buffer);
6436
Steven Rostedtf9520752009-03-02 14:04:40 -05006437 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006438}
6439
Jason Wessel955b61e2010-08-05 09:22:23 -05006440void trace_init_global_iter(struct trace_iterator *iter)
6441{
6442 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006443 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006444 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006445 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006446
6447 if (iter->trace && iter->trace->open)
6448 iter->trace->open(iter);
6449
6450 /* Annotate start of buffers if we had overruns */
6451 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6452 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6453
6454 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6455 if (trace_clocks[iter->tr->clock_id].in_ns)
6456 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006457}
6458
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006459void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006460{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006461 /* use static because iter can be a bit big for the stack */
6462 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006463 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006464 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006465 unsigned long flags;
6466 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006467
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006468 /* Only allow one dump user at a time. */
6469 if (atomic_inc_return(&dump_running) != 1) {
6470 atomic_dec(&dump_running);
6471 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006472 }
6473
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006474 /*
6475 * Always turn off tracing when we dump.
6476 * We don't need to show trace output of what happens
6477 * between multiple crashes.
6478 *
6479 * If the user does a sysrq-z, then they can re-enable
6480 * tracing with echo 1 > tracing_on.
6481 */
6482 tracing_off();
6483
6484 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006485
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006486 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006487 trace_init_global_iter(&iter);
6488
Steven Rostedtd7690412008-10-01 00:29:53 -04006489 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006490 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006491 }
6492
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006493 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6494
Török Edwinb54d3de2008-11-22 13:28:48 +02006495 /* don't look at user memory in panic mode */
6496 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6497
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006498 switch (oops_dump_mode) {
6499 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006500 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006501 break;
6502 case DUMP_ORIG:
6503 iter.cpu_file = raw_smp_processor_id();
6504 break;
6505 case DUMP_NONE:
6506 goto out_enable;
6507 default:
6508 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006509 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006510 }
6511
6512 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006513
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006514 /* Did function tracer already get disabled? */
6515 if (ftrace_is_dead()) {
6516 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6517 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6518 }
6519
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006520 /*
6521 * We need to stop all tracing on all CPUS to read the
6522 * the next buffer. This is a bit expensive, but is
6523 * not done often. We fill all what we can read,
6524 * and then release the locks again.
6525 */
6526
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006527 while (!trace_empty(&iter)) {
6528
6529 if (!cnt)
6530 printk(KERN_TRACE "---------------------------------\n");
6531
6532 cnt++;
6533
6534 /* reset all but tr, trace, and overruns */
6535 memset(&iter.seq, 0,
6536 sizeof(struct trace_iterator) -
6537 offsetof(struct trace_iterator, seq));
6538 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6539 iter.pos = -1;
6540
Jason Wessel955b61e2010-08-05 09:22:23 -05006541 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006542 int ret;
6543
6544 ret = print_trace_line(&iter);
6545 if (ret != TRACE_TYPE_NO_CONSUME)
6546 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006547 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006548 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006549
6550 trace_printk_seq(&iter.seq);
6551 }
6552
6553 if (!cnt)
6554 printk(KERN_TRACE " (ftrace buffer empty)\n");
6555 else
6556 printk(KERN_TRACE "---------------------------------\n");
6557
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006558 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006559 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006560
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006561 for_each_tracing_cpu(cpu) {
6562 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006563 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006564 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006565 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006566}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006567EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006568
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006569__init static int tracer_alloc_buffers(void)
6570{
Steven Rostedt73c51622009-03-11 13:42:01 -04006571 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306572 int ret = -ENOMEM;
6573
David Sharp750912f2010-12-08 13:46:47 -08006574
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306575 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6576 goto out;
6577
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006578 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306579 goto out_free_buffer_mask;
6580
Steven Rostedt07d777f2011-09-22 14:01:55 -04006581 /* Only allocate trace_printk buffers if a trace_printk exists */
6582 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006583 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006584 trace_printk_init_buffers();
6585
Steven Rostedt73c51622009-03-11 13:42:01 -04006586 /* To save memory, keep the ring buffer size to its minimum */
6587 if (ring_buffer_expanded)
6588 ring_buf_size = trace_buf_size;
6589 else
6590 ring_buf_size = 1;
6591
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306592 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006593 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006594
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006595 raw_spin_lock_init(&global_trace.start_lock);
6596
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006597 /* Used for event triggers */
6598 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6599 if (!temp_buffer)
6600 goto out_free_cpumask;
6601
Steven Rostedtab464282008-05-12 21:21:00 +02006602 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006603 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006604 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6605 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006606 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006607 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006608
Steven Rostedt499e5472012-02-22 15:50:28 -05006609 if (global_trace.buffer_disabled)
6610 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006611
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006612 trace_init_cmdlines();
6613
Steven Rostedte1e232c2014-02-10 23:38:46 -05006614 if (trace_boot_clock) {
6615 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6616 if (ret < 0)
6617 pr_warning("Trace clock %s not defined, going back to default\n",
6618 trace_boot_clock);
6619 }
6620
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006621 /*
6622 * register_tracer() might reference current_trace, so it
6623 * needs to be set before we register anything. This is
6624 * just a bootstrap of current_trace anyway.
6625 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006626 global_trace.current_trace = &nop_trace;
6627
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006628 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6629
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006630 ftrace_init_global_array_ops(&global_trace);
6631
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006632 register_tracer(&nop_trace);
6633
Steven Rostedt60a11772008-05-12 21:20:44 +02006634 /* All seems OK, enable tracing */
6635 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006636
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006637 atomic_notifier_chain_register(&panic_notifier_list,
6638 &trace_panic_notifier);
6639
6640 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006641
Steven Rostedtae63b312012-05-03 23:09:03 -04006642 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6643
6644 INIT_LIST_HEAD(&global_trace.systems);
6645 INIT_LIST_HEAD(&global_trace.events);
6646 list_add(&global_trace.list, &ftrace_trace_arrays);
6647
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006648 while (trace_boot_options) {
6649 char *option;
6650
6651 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006652 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006653 }
6654
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006655 register_snapshot_cmd();
6656
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006657 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006658
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006659out_free_temp_buffer:
6660 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306661out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006662 free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664 free_percpu(global_trace.max_buffer.data);
6665#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006666 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306667out_free_buffer_mask:
6668 free_cpumask_var(tracing_buffer_mask);
6669out:
6670 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006671}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006672
6673__init static int clear_boot_tracer(void)
6674{
6675 /*
6676 * The default tracer at boot buffer is an init section.
6677 * This function is called in lateinit. If we did not
6678 * find the boot tracer, then clear it out, to prevent
6679 * later registration from accessing the buffer that is
6680 * about to be freed.
6681 */
6682 if (!default_bootup_tracer)
6683 return 0;
6684
6685 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6686 default_bootup_tracer);
6687 default_bootup_tracer = NULL;
6688
6689 return 0;
6690}
6691
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006692early_initcall(tracer_alloc_buffers);
6693fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006694late_initcall(clear_boot_tracer);