blob: e058c6091e4543f737c1645ff63a2f473d6e098e [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001088static void default_wait_pipe(struct trace_iterator *iter)
1089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1092 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205 if (!type->wait_pipe)
1206 type->wait_pipe = default_wait_pipe;
1207
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001208 ret = run_tracer_selftest(type);
1209 if (ret < 0)
1210 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001211
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001212 type->next = trace_types;
1213 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001214
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001216 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001217 mutex_unlock(&trace_types_lock);
1218
Steven Rostedtdac74942009-02-05 01:13:38 -05001219 if (ret || !default_bootup_tracer)
1220 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001221
Li Zefanee6c2c12009-09-18 14:06:47 +08001222 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001223 goto out_unlock;
1224
1225 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1226 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001227 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001228 default_bootup_tracer = NULL;
1229 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001230 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001231#ifdef CONFIG_FTRACE_STARTUP_TEST
1232 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1233 type->name);
1234#endif
1235
1236 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001237 return ret;
1238}
1239
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001241{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001242 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001243
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001244 if (!buffer)
1245 return;
1246
Steven Rostedtf6339032009-09-04 12:35:16 -04001247 ring_buffer_record_disable(buffer);
1248
1249 /* Make sure all commits have finished */
1250 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001251 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001252
1253 ring_buffer_record_enable(buffer);
1254}
1255
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001258 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001259 int cpu;
1260
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001261 if (!buffer)
1262 return;
1263
Steven Rostedt621968c2009-09-04 12:02:35 -04001264 ring_buffer_record_disable(buffer);
1265
1266 /* Make sure all commits have finished */
1267 synchronize_sched();
1268
Alexander Z Lam94571582013-08-02 18:36:16 -07001269 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001270
1271 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001272 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001273
1274 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001275}
1276
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001277/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001279{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 struct trace_array *tr;
1281
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001282 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001283 tracing_reset_online_cpus(&tr->trace_buffer);
1284#ifdef CONFIG_TRACER_MAX_TRACE
1285 tracing_reset_online_cpus(&tr->max_buffer);
1286#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001287 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001288}
1289
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001290#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001291#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001292static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1293static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1294static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1295static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001296static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001297
Steven Rostedt25b0b442008-05-12 21:21:00 +02001298/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001299static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001300
1301static void trace_init_cmdlines(void)
1302{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001303 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1304 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001305 cmdline_idx = 0;
1306}
1307
Carsten Emdeb5130b12009-09-13 01:43:07 +02001308int is_tracing_stopped(void)
1309{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001310 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001311}
1312
Steven Rostedt0f048702008-11-05 16:05:44 -05001313/**
1314 * tracing_start - quick start of the tracer
1315 *
1316 * If tracing is enabled but was stopped by tracing_stop,
1317 * this will start the tracer back up.
1318 */
1319void tracing_start(void)
1320{
1321 struct ring_buffer *buffer;
1322 unsigned long flags;
1323
1324 if (tracing_disabled)
1325 return;
1326
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001327 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1328 if (--global_trace.stop_count) {
1329 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001330 /* Someone screwed up their debugging */
1331 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001332 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001333 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001334 goto out;
1335 }
1336
Steven Rostedta2f80712010-03-12 19:56:00 -05001337 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001338 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001339
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001340 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001341 if (buffer)
1342 ring_buffer_record_enable(buffer);
1343
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001344#ifdef CONFIG_TRACER_MAX_TRACE
1345 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001346 if (buffer)
1347 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001348#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001349
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001350 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001351
Steven Rostedt0f048702008-11-05 16:05:44 -05001352 ftrace_start();
1353 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001354 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1355}
1356
1357static void tracing_start_tr(struct trace_array *tr)
1358{
1359 struct ring_buffer *buffer;
1360 unsigned long flags;
1361
1362 if (tracing_disabled)
1363 return;
1364
1365 /* If global, we need to also start the max tracer */
1366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1367 return tracing_start();
1368
1369 raw_spin_lock_irqsave(&tr->start_lock, flags);
1370
1371 if (--tr->stop_count) {
1372 if (tr->stop_count < 0) {
1373 /* Someone screwed up their debugging */
1374 WARN_ON_ONCE(1);
1375 tr->stop_count = 0;
1376 }
1377 goto out;
1378 }
1379
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001380 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001381 if (buffer)
1382 ring_buffer_record_enable(buffer);
1383
1384 out:
1385 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001386}
1387
1388/**
1389 * tracing_stop - quick stop of the tracer
1390 *
1391 * Light weight way to stop tracing. Use in conjunction with
1392 * tracing_start.
1393 */
1394void tracing_stop(void)
1395{
1396 struct ring_buffer *buffer;
1397 unsigned long flags;
1398
1399 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001400 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1401 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001402 goto out;
1403
Steven Rostedta2f80712010-03-12 19:56:00 -05001404 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001405 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001406
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001407 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001408 if (buffer)
1409 ring_buffer_record_disable(buffer);
1410
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001411#ifdef CONFIG_TRACER_MAX_TRACE
1412 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001413 if (buffer)
1414 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001415#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001416
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001417 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001418
Steven Rostedt0f048702008-11-05 16:05:44 -05001419 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001420 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1421}
1422
1423static void tracing_stop_tr(struct trace_array *tr)
1424{
1425 struct ring_buffer *buffer;
1426 unsigned long flags;
1427
1428 /* If global, we need to also stop the max tracer */
1429 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1430 return tracing_stop();
1431
1432 raw_spin_lock_irqsave(&tr->start_lock, flags);
1433 if (tr->stop_count++)
1434 goto out;
1435
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001436 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001437 if (buffer)
1438 ring_buffer_record_disable(buffer);
1439
1440 out:
1441 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001442}
1443
Ingo Molnare309b412008-05-12 21:20:51 +02001444void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001445
Ingo Molnare309b412008-05-12 21:20:51 +02001446static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001447{
Carsten Emdea635cf02009-03-18 09:00:41 +01001448 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001449
1450 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1451 return;
1452
1453 /*
1454 * It's not the end of the world if we don't get
1455 * the lock, but we also don't want to spin
1456 * nor do we want to disable interrupts,
1457 * so if we miss here, then better luck next time.
1458 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001459 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001460 return;
1461
1462 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001463 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001464 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1465
Carsten Emdea635cf02009-03-18 09:00:41 +01001466 /*
1467 * Check whether the cmdline buffer at idx has a pid
1468 * mapped. We are going to overwrite that entry so we
1469 * need to clear the map_pid_to_cmdline. Otherwise we
1470 * would read the new comm for the old pid.
1471 */
1472 pid = map_cmdline_to_pid[idx];
1473 if (pid != NO_CMDLINE_MAP)
1474 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475
Carsten Emdea635cf02009-03-18 09:00:41 +01001476 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001477 map_pid_to_cmdline[tsk->pid] = idx;
1478
1479 cmdline_idx = idx;
1480 }
1481
1482 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1483
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001484 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485}
1486
Steven Rostedt4ca53082009-03-16 19:20:15 -04001487void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001488{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489 unsigned map;
1490
Steven Rostedt4ca53082009-03-16 19:20:15 -04001491 if (!pid) {
1492 strcpy(comm, "<idle>");
1493 return;
1494 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Steven Rostedt74bf4072010-01-25 15:11:53 -05001496 if (WARN_ON_ONCE(pid < 0)) {
1497 strcpy(comm, "<XXX>");
1498 return;
1499 }
1500
Steven Rostedt4ca53082009-03-16 19:20:15 -04001501 if (pid > PID_MAX_DEFAULT) {
1502 strcpy(comm, "<...>");
1503 return;
1504 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001505
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001506 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001507 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001509 if (map != NO_CMDLINE_MAP)
1510 strcpy(comm, saved_cmdlines[map]);
1511 else
1512 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001514 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001515 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516}
1517
Ingo Molnare309b412008-05-12 21:20:51 +02001518void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001520 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001521 return;
1522
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001523 if (!__this_cpu_read(trace_cmdline_save))
1524 return;
1525
1526 __this_cpu_write(trace_cmdline_save, false);
1527
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 trace_save_cmdline(tsk);
1529}
1530
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001531void
Steven Rostedt38697052008-10-01 13:14:09 -04001532tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1533 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534{
1535 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001536
Steven Rostedt777e2082008-09-29 23:02:42 -04001537 entry->preempt_count = pc & 0xff;
1538 entry->pid = (tsk) ? tsk->pid : 0;
1539 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001540#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001541 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001542#else
1543 TRACE_FLAG_IRQS_NOSUPPORT |
1544#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1546 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001547 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1548 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001549}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001550EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001551
Steven Rostedte77405a2009-09-02 14:17:06 -04001552struct ring_buffer_event *
1553trace_buffer_lock_reserve(struct ring_buffer *buffer,
1554 int type,
1555 unsigned long len,
1556 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001557{
1558 struct ring_buffer_event *event;
1559
Steven Rostedte77405a2009-09-02 14:17:06 -04001560 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001561 if (event != NULL) {
1562 struct trace_entry *ent = ring_buffer_event_data(event);
1563
1564 tracing_generic_entry_update(ent, flags, pc);
1565 ent->type = type;
1566 }
1567
1568 return event;
1569}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001570
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001571void
1572__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1573{
1574 __this_cpu_write(trace_cmdline_save, true);
1575 ring_buffer_unlock_commit(buffer, event);
1576}
1577
Steven Rostedte77405a2009-09-02 14:17:06 -04001578static inline void
1579__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1580 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001581 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001582{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001583 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001584
Steven Rostedte77405a2009-09-02 14:17:06 -04001585 ftrace_trace_stack(buffer, flags, 6, pc);
1586 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001587}
1588
Steven Rostedte77405a2009-09-02 14:17:06 -04001589void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1590 struct ring_buffer_event *event,
1591 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001592{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001593 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001594}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001595EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001596
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001597static struct ring_buffer *temp_buffer;
1598
Steven Rostedtef5580d2009-02-27 19:38:04 -05001599struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001600trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1601 struct ftrace_event_file *ftrace_file,
1602 int type, unsigned long len,
1603 unsigned long flags, int pc)
1604{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001605 struct ring_buffer_event *entry;
1606
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001607 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001608 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001609 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001610 /*
1611 * If tracing is off, but we have triggers enabled
1612 * we still need to look at the event data. Use the temp_buffer
1613 * to store the trace event for the tigger to use. It's recusive
1614 * safe and will not be recorded anywhere.
1615 */
1616 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1617 *current_rb = temp_buffer;
1618 entry = trace_buffer_lock_reserve(*current_rb,
1619 type, len, flags, pc);
1620 }
1621 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001622}
1623EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1624
1625struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001626trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1627 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001628 unsigned long flags, int pc)
1629{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001630 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001631 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001632 type, len, flags, pc);
1633}
Steven Rostedt94487d62009-05-05 19:22:53 -04001634EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001635
Steven Rostedte77405a2009-09-02 14:17:06 -04001636void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001638 unsigned long flags, int pc)
1639{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001640 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001641}
Steven Rostedt94487d62009-05-05 19:22:53 -04001642EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001643
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001644void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1645 struct ring_buffer_event *event,
1646 unsigned long flags, int pc,
1647 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001648{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001649 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001650
1651 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1652 ftrace_trace_userstack(buffer, flags, pc);
1653}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001654EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001655
Steven Rostedte77405a2009-09-02 14:17:06 -04001656void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1657 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001658{
Steven Rostedte77405a2009-09-02 14:17:06 -04001659 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001660}
Steven Rostedt12acd472009-04-17 16:01:56 -04001661EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001662
Ingo Molnare309b412008-05-12 21:20:51 +02001663void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001664trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001665 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1666 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001667{
Tom Zanussie1112b42009-03-31 00:48:49 -05001668 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001669 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001670 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001671 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001672
Steven Rostedtd7690412008-10-01 00:29:53 -04001673 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001674 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001675 return;
1676
Steven Rostedte77405a2009-09-02 14:17:06 -04001677 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001678 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001679 if (!event)
1680 return;
1681 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001682 entry->ip = ip;
1683 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001684
Tom Zanussif306cc82013-10-24 08:34:17 -05001685 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001686 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001687}
1688
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001689#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001690
1691#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1692struct ftrace_stack {
1693 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1694};
1695
1696static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1697static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1698
Steven Rostedte77405a2009-09-02 14:17:06 -04001699static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001700 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001701 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001702{
Tom Zanussie1112b42009-03-31 00:48:49 -05001703 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001704 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001705 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001706 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001707 int use_stack;
1708 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001709
1710 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001711 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001712
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001713 /*
1714 * Since events can happen in NMIs there's no safe way to
1715 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1716 * or NMI comes in, it will just have to use the default
1717 * FTRACE_STACK_SIZE.
1718 */
1719 preempt_disable_notrace();
1720
Shan Wei82146522012-11-19 13:21:01 +08001721 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001722 /*
1723 * We don't need any atomic variables, just a barrier.
1724 * If an interrupt comes in, we don't care, because it would
1725 * have exited and put the counter back to what we want.
1726 * We just need a barrier to keep gcc from moving things
1727 * around.
1728 */
1729 barrier();
1730 if (use_stack == 1) {
1731 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1732 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1733
1734 if (regs)
1735 save_stack_trace_regs(regs, &trace);
1736 else
1737 save_stack_trace(&trace);
1738
1739 if (trace.nr_entries > size)
1740 size = trace.nr_entries;
1741 } else
1742 /* From now on, use_stack is a boolean */
1743 use_stack = 0;
1744
1745 size *= sizeof(unsigned long);
1746
1747 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1748 sizeof(*entry) + size, flags, pc);
1749 if (!event)
1750 goto out;
1751 entry = ring_buffer_event_data(event);
1752
1753 memset(&entry->caller, 0, size);
1754
1755 if (use_stack)
1756 memcpy(&entry->caller, trace.entries,
1757 trace.nr_entries * sizeof(unsigned long));
1758 else {
1759 trace.max_entries = FTRACE_STACK_ENTRIES;
1760 trace.entries = entry->caller;
1761 if (regs)
1762 save_stack_trace_regs(regs, &trace);
1763 else
1764 save_stack_trace(&trace);
1765 }
1766
1767 entry->size = trace.nr_entries;
1768
Tom Zanussif306cc82013-10-24 08:34:17 -05001769 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001770 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001771
1772 out:
1773 /* Again, don't let gcc optimize things here */
1774 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001775 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001776 preempt_enable_notrace();
1777
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001778}
1779
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001780void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1781 int skip, int pc, struct pt_regs *regs)
1782{
1783 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1784 return;
1785
1786 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1787}
1788
Steven Rostedte77405a2009-09-02 14:17:06 -04001789void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1790 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001791{
1792 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1793 return;
1794
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001795 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001796}
1797
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001798void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1799 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001800{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001801 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001802}
1803
Steven Rostedt03889382009-12-11 09:48:22 -05001804/**
1805 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001806 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001807 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001808void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001809{
1810 unsigned long flags;
1811
1812 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001813 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001814
1815 local_save_flags(flags);
1816
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001817 /*
1818 * Skip 3 more, seems to get us at the caller of
1819 * this function.
1820 */
1821 skip += 3;
1822 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1823 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001824}
1825
Steven Rostedt91e86e52010-11-10 12:56:12 +01001826static DEFINE_PER_CPU(int, user_stack_count);
1827
Steven Rostedte77405a2009-09-02 14:17:06 -04001828void
1829ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001830{
Tom Zanussie1112b42009-03-31 00:48:49 -05001831 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001832 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001833 struct userstack_entry *entry;
1834 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001835
1836 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1837 return;
1838
Steven Rostedtb6345872010-03-12 20:03:30 -05001839 /*
1840 * NMIs can not handle page faults, even with fix ups.
1841 * The save user stack can (and often does) fault.
1842 */
1843 if (unlikely(in_nmi()))
1844 return;
1845
Steven Rostedt91e86e52010-11-10 12:56:12 +01001846 /*
1847 * prevent recursion, since the user stack tracing may
1848 * trigger other kernel events.
1849 */
1850 preempt_disable();
1851 if (__this_cpu_read(user_stack_count))
1852 goto out;
1853
1854 __this_cpu_inc(user_stack_count);
1855
Steven Rostedte77405a2009-09-02 14:17:06 -04001856 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001857 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001858 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001859 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001860 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001861
Steven Rostedt48659d32009-09-11 11:36:23 -04001862 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001863 memset(&entry->caller, 0, sizeof(entry->caller));
1864
1865 trace.nr_entries = 0;
1866 trace.max_entries = FTRACE_STACK_ENTRIES;
1867 trace.skip = 0;
1868 trace.entries = entry->caller;
1869
1870 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001871 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001872 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001873
Li Zefan1dbd1952010-12-09 15:47:56 +08001874 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001875 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001876 out:
1877 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001878}
1879
Hannes Eder4fd27352009-02-10 19:44:12 +01001880#ifdef UNUSED
1881static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001882{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001883 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001884}
Hannes Eder4fd27352009-02-10 19:44:12 +01001885#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001886
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001887#endif /* CONFIG_STACKTRACE */
1888
Steven Rostedt07d777f2011-09-22 14:01:55 -04001889/* created for use with alloc_percpu */
1890struct trace_buffer_struct {
1891 char buffer[TRACE_BUF_SIZE];
1892};
1893
1894static struct trace_buffer_struct *trace_percpu_buffer;
1895static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1896static struct trace_buffer_struct *trace_percpu_irq_buffer;
1897static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1898
1899/*
1900 * The buffer used is dependent on the context. There is a per cpu
1901 * buffer for normal context, softirq contex, hard irq context and
1902 * for NMI context. Thise allows for lockless recording.
1903 *
1904 * Note, if the buffers failed to be allocated, then this returns NULL
1905 */
1906static char *get_trace_buf(void)
1907{
1908 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001909
1910 /*
1911 * If we have allocated per cpu buffers, then we do not
1912 * need to do any locking.
1913 */
1914 if (in_nmi())
1915 percpu_buffer = trace_percpu_nmi_buffer;
1916 else if (in_irq())
1917 percpu_buffer = trace_percpu_irq_buffer;
1918 else if (in_softirq())
1919 percpu_buffer = trace_percpu_sirq_buffer;
1920 else
1921 percpu_buffer = trace_percpu_buffer;
1922
1923 if (!percpu_buffer)
1924 return NULL;
1925
Shan Weid8a03492012-11-13 09:53:04 +08001926 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001927}
1928
1929static int alloc_percpu_trace_buffer(void)
1930{
1931 struct trace_buffer_struct *buffers;
1932 struct trace_buffer_struct *sirq_buffers;
1933 struct trace_buffer_struct *irq_buffers;
1934 struct trace_buffer_struct *nmi_buffers;
1935
1936 buffers = alloc_percpu(struct trace_buffer_struct);
1937 if (!buffers)
1938 goto err_warn;
1939
1940 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1941 if (!sirq_buffers)
1942 goto err_sirq;
1943
1944 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1945 if (!irq_buffers)
1946 goto err_irq;
1947
1948 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1949 if (!nmi_buffers)
1950 goto err_nmi;
1951
1952 trace_percpu_buffer = buffers;
1953 trace_percpu_sirq_buffer = sirq_buffers;
1954 trace_percpu_irq_buffer = irq_buffers;
1955 trace_percpu_nmi_buffer = nmi_buffers;
1956
1957 return 0;
1958
1959 err_nmi:
1960 free_percpu(irq_buffers);
1961 err_irq:
1962 free_percpu(sirq_buffers);
1963 err_sirq:
1964 free_percpu(buffers);
1965 err_warn:
1966 WARN(1, "Could not allocate percpu trace_printk buffer");
1967 return -ENOMEM;
1968}
1969
Steven Rostedt81698832012-10-11 10:15:05 -04001970static int buffers_allocated;
1971
Steven Rostedt07d777f2011-09-22 14:01:55 -04001972void trace_printk_init_buffers(void)
1973{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001974 if (buffers_allocated)
1975 return;
1976
1977 if (alloc_percpu_trace_buffer())
1978 return;
1979
1980 pr_info("ftrace: Allocated trace_printk buffers\n");
1981
Steven Rostedtb382ede62012-10-10 21:44:34 -04001982 /* Expand the buffers to set size */
1983 tracing_update_buffers();
1984
Steven Rostedt07d777f2011-09-22 14:01:55 -04001985 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001986
1987 /*
1988 * trace_printk_init_buffers() can be called by modules.
1989 * If that happens, then we need to start cmdline recording
1990 * directly here. If the global_trace.buffer is already
1991 * allocated here, then this was called by module code.
1992 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001993 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04001994 tracing_start_cmdline_record();
1995}
1996
1997void trace_printk_start_comm(void)
1998{
1999 /* Start tracing comms if trace printk is set */
2000 if (!buffers_allocated)
2001 return;
2002 tracing_start_cmdline_record();
2003}
2004
2005static void trace_printk_start_stop_comm(int enabled)
2006{
2007 if (!buffers_allocated)
2008 return;
2009
2010 if (enabled)
2011 tracing_start_cmdline_record();
2012 else
2013 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002014}
2015
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002016/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002017 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002018 *
2019 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002020int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002021{
Tom Zanussie1112b42009-03-31 00:48:49 -05002022 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002023 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002024 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002025 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002026 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002027 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002028 char *tbuffer;
2029 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002030
2031 if (unlikely(tracing_selftest_running || tracing_disabled))
2032 return 0;
2033
2034 /* Don't pollute graph traces with trace_vprintk internals */
2035 pause_graph_tracing();
2036
2037 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002038 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002039
Steven Rostedt07d777f2011-09-22 14:01:55 -04002040 tbuffer = get_trace_buf();
2041 if (!tbuffer) {
2042 len = 0;
2043 goto out;
2044 }
2045
2046 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2047
2048 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002049 goto out;
2050
Steven Rostedt07d777f2011-09-22 14:01:55 -04002051 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002052 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002053 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002054 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2055 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002056 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002057 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058 entry = ring_buffer_event_data(event);
2059 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002060 entry->fmt = fmt;
2061
Steven Rostedt07d777f2011-09-22 14:01:55 -04002062 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002063 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002064 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002065 ftrace_trace_stack(buffer, flags, 6, pc);
2066 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002067
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002068out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002069 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002070 unpause_graph_tracing();
2071
2072 return len;
2073}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002074EXPORT_SYMBOL_GPL(trace_vbprintk);
2075
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002076static int
2077__trace_array_vprintk(struct ring_buffer *buffer,
2078 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002079{
Tom Zanussie1112b42009-03-31 00:48:49 -05002080 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002081 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002082 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002083 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002084 unsigned long flags;
2085 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002086
2087 if (tracing_disabled || tracing_selftest_running)
2088 return 0;
2089
Steven Rostedt07d777f2011-09-22 14:01:55 -04002090 /* Don't pollute graph traces with trace_vprintk internals */
2091 pause_graph_tracing();
2092
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002093 pc = preempt_count();
2094 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096
2097 tbuffer = get_trace_buf();
2098 if (!tbuffer) {
2099 len = 0;
2100 goto out;
2101 }
2102
2103 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2104 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002105 goto out;
2106
Steven Rostedt07d777f2011-09-22 14:01:55 -04002107 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002108 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002109 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002110 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002111 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002112 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002113 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002114 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002115
Steven Rostedt07d777f2011-09-22 14:01:55 -04002116 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002117 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002118 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002119 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002121 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002122 out:
2123 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002124 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002125
2126 return len;
2127}
Steven Rostedt659372d2009-09-03 19:11:07 -04002128
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002129int trace_array_vprintk(struct trace_array *tr,
2130 unsigned long ip, const char *fmt, va_list args)
2131{
2132 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2133}
2134
2135int trace_array_printk(struct trace_array *tr,
2136 unsigned long ip, const char *fmt, ...)
2137{
2138 int ret;
2139 va_list ap;
2140
2141 if (!(trace_flags & TRACE_ITER_PRINTK))
2142 return 0;
2143
2144 va_start(ap, fmt);
2145 ret = trace_array_vprintk(tr, ip, fmt, ap);
2146 va_end(ap);
2147 return ret;
2148}
2149
2150int trace_array_printk_buf(struct ring_buffer *buffer,
2151 unsigned long ip, const char *fmt, ...)
2152{
2153 int ret;
2154 va_list ap;
2155
2156 if (!(trace_flags & TRACE_ITER_PRINTK))
2157 return 0;
2158
2159 va_start(ap, fmt);
2160 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2161 va_end(ap);
2162 return ret;
2163}
2164
Steven Rostedt659372d2009-09-03 19:11:07 -04002165int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2166{
Steven Rostedta813a152009-10-09 01:41:35 -04002167 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002168}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002169EXPORT_SYMBOL_GPL(trace_vprintk);
2170
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002171static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002172{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002173 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2174
Steven Rostedt5a90f572008-09-03 17:42:51 -04002175 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002176 if (buf_iter)
2177 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002178}
2179
Ingo Molnare309b412008-05-12 21:20:51 +02002180static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002181peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2182 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002183{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002184 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002185 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002186
Steven Rostedtd7690412008-10-01 00:29:53 -04002187 if (buf_iter)
2188 event = ring_buffer_iter_peek(buf_iter, ts);
2189 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002190 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002191 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002192
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002193 if (event) {
2194 iter->ent_size = ring_buffer_event_length(event);
2195 return ring_buffer_event_data(event);
2196 }
2197 iter->ent_size = 0;
2198 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002199}
Steven Rostedtd7690412008-10-01 00:29:53 -04002200
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002201static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002202__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2203 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002204{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002205 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002206 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002207 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002208 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002209 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002210 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002211 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002212 int cpu;
2213
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002214 /*
2215 * If we are in a per_cpu trace file, don't bother by iterating over
2216 * all cpu and peek directly.
2217 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002218 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002219 if (ring_buffer_empty_cpu(buffer, cpu_file))
2220 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002221 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002222 if (ent_cpu)
2223 *ent_cpu = cpu_file;
2224
2225 return ent;
2226 }
2227
Steven Rostedtab464282008-05-12 21:21:00 +02002228 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002229
2230 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002231 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002232
Steven Rostedtbc21b472010-03-31 19:49:26 -04002233 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002234
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002235 /*
2236 * Pick the entry with the smallest timestamp:
2237 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002238 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002239 next = ent;
2240 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002241 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002242 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002243 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244 }
2245 }
2246
Steven Rostedt12b5da32012-03-27 10:43:28 -04002247 iter->ent_size = next_size;
2248
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002249 if (ent_cpu)
2250 *ent_cpu = next_cpu;
2251
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002252 if (ent_ts)
2253 *ent_ts = next_ts;
2254
Steven Rostedtbc21b472010-03-31 19:49:26 -04002255 if (missing_events)
2256 *missing_events = next_lost;
2257
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002258 return next;
2259}
2260
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002261/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002262struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2263 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002264{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002265 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002266}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002267
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002268/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002269void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002270{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271 iter->ent = __find_next_entry(iter, &iter->cpu,
2272 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002273
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002274 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002275 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002276
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002277 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002278}
2279
Ingo Molnare309b412008-05-12 21:20:51 +02002280static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002281{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002282 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002283 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002284}
2285
Ingo Molnare309b412008-05-12 21:20:51 +02002286static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002287{
2288 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002289 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002290 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002291
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002292 WARN_ON_ONCE(iter->leftover);
2293
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002294 (*pos)++;
2295
2296 /* can't go backwards */
2297 if (iter->idx > i)
2298 return NULL;
2299
2300 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002301 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002302 else
2303 ent = iter;
2304
2305 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002306 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307
2308 iter->pos = *pos;
2309
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 return ent;
2311}
2312
Jason Wessel955b61e2010-08-05 09:22:23 -05002313void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002314{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002315 struct ring_buffer_event *event;
2316 struct ring_buffer_iter *buf_iter;
2317 unsigned long entries = 0;
2318 u64 ts;
2319
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002320 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002321
Steven Rostedt6d158a82012-06-27 20:46:14 -04002322 buf_iter = trace_buffer_iter(iter, cpu);
2323 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002324 return;
2325
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002326 ring_buffer_iter_reset(buf_iter);
2327
2328 /*
2329 * We could have the case with the max latency tracers
2330 * that a reset never took place on a cpu. This is evident
2331 * by the timestamp being before the start of the buffer.
2332 */
2333 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002334 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002335 break;
2336 entries++;
2337 ring_buffer_read(buf_iter, NULL);
2338 }
2339
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002340 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002341}
2342
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002343/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002344 * The current tracer is copied to avoid a global locking
2345 * all around.
2346 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347static void *s_start(struct seq_file *m, loff_t *pos)
2348{
2349 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002350 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002351 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352 void *p = NULL;
2353 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002354 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002356 /*
2357 * copy the tracer to avoid using a global lock all around.
2358 * iter->trace is a copy of current_trace, the pointer to the
2359 * name may be used instead of a strcmp(), as iter->trace->name
2360 * will point to the same string as current_trace->name.
2361 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002363 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2364 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002365 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002366
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002367#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002368 if (iter->snapshot && iter->trace->use_max_tr)
2369 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002370#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002371
2372 if (!iter->snapshot)
2373 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002374
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375 if (*pos != iter->pos) {
2376 iter->ent = NULL;
2377 iter->cpu = 0;
2378 iter->idx = -1;
2379
Steven Rostedtae3b5092013-01-23 15:22:59 -05002380 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002381 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002383 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002384 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002385
Lai Jiangshanac91d852010-03-02 17:54:50 +08002386 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002387 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2388 ;
2389
2390 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002391 /*
2392 * If we overflowed the seq_file before, then we want
2393 * to just reuse the trace_seq buffer again.
2394 */
2395 if (iter->leftover)
2396 p = iter;
2397 else {
2398 l = *pos - 1;
2399 p = s_next(m, p, &l);
2400 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002401 }
2402
Lai Jiangshan4f535962009-05-18 19:35:34 +08002403 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002404 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002405 return p;
2406}
2407
2408static void s_stop(struct seq_file *m, void *p)
2409{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002410 struct trace_iterator *iter = m->private;
2411
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002412#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002413 if (iter->snapshot && iter->trace->use_max_tr)
2414 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002415#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002416
2417 if (!iter->snapshot)
2418 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002419
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002420 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002421 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002422}
2423
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002424static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425get_total_entries(struct trace_buffer *buf,
2426 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002427{
2428 unsigned long count;
2429 int cpu;
2430
2431 *total = 0;
2432 *entries = 0;
2433
2434 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002436 /*
2437 * If this buffer has skipped entries, then we hold all
2438 * entries for the trace and we need to ignore the
2439 * ones before the time stamp.
2440 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002441 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2442 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002443 /* total is the same as the entries */
2444 *total += count;
2445 } else
2446 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002448 *entries += count;
2449 }
2450}
2451
Ingo Molnare309b412008-05-12 21:20:51 +02002452static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453{
Michael Ellermana6168352008-08-20 16:36:11 -07002454 seq_puts(m, "# _------=> CPU# \n");
2455 seq_puts(m, "# / _-----=> irqs-off \n");
2456 seq_puts(m, "# | / _----=> need-resched \n");
2457 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2458 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002459 seq_puts(m, "# |||| / delay \n");
2460 seq_puts(m, "# cmd pid ||||| time | caller \n");
2461 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462}
2463
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002464static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002466 unsigned long total;
2467 unsigned long entries;
2468
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002469 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002470 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2471 entries, total, num_online_cpus());
2472 seq_puts(m, "#\n");
2473}
2474
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002476{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002478 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002479 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002480}
2481
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002482static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002483{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002484 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002485 seq_puts(m, "# _-----=> irqs-off\n");
2486 seq_puts(m, "# / _----=> need-resched\n");
2487 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2488 seq_puts(m, "# || / _--=> preempt-depth\n");
2489 seq_puts(m, "# ||| / delay\n");
2490 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2491 seq_puts(m, "# | | | |||| | |\n");
2492}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002493
Jiri Olsa62b915f2010-04-02 19:01:22 +02002494void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002495print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2496{
2497 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002498 struct trace_buffer *buf = iter->trace_buffer;
2499 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002500 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002501 unsigned long entries;
2502 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002503 const char *name = "preemption";
2504
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002505 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002506
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002507 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002508
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002509 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002510 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002511 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002512 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002513 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002514 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002515 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002516 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002517 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002518 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519#if defined(CONFIG_PREEMPT_NONE)
2520 "server",
2521#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2522 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002523#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524 "preempt",
2525#else
2526 "unknown",
2527#endif
2528 /* These are reserved for later use */
2529 0, 0, 0, 0);
2530#ifdef CONFIG_SMP
2531 seq_printf(m, " #P:%d)\n", num_online_cpus());
2532#else
2533 seq_puts(m, ")\n");
2534#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002535 seq_puts(m, "# -----------------\n");
2536 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002537 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002538 data->comm, data->pid,
2539 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002540 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002541 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002542
2543 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002544 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002545 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2546 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002547 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002548 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2549 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002550 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002551 }
2552
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002553 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554}
2555
Steven Rostedta3097202008-11-07 22:36:02 -05002556static void test_cpu_buff_start(struct trace_iterator *iter)
2557{
2558 struct trace_seq *s = &iter->seq;
2559
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002560 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2561 return;
2562
2563 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2564 return;
2565
Rusty Russell44623442009-01-01 10:12:23 +10302566 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002567 return;
2568
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002569 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002570 return;
2571
Rusty Russell44623442009-01-01 10:12:23 +10302572 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002573
2574 /* Don't print started cpu buffer for the first entry of the trace */
2575 if (iter->idx > 1)
2576 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2577 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002578}
2579
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002580static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002581{
Steven Rostedt214023c2008-05-12 21:20:46 +02002582 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002584 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002585 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002586
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002587 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002588
Steven Rostedta3097202008-11-07 22:36:02 -05002589 test_cpu_buff_start(iter);
2590
Steven Rostedtf633cef2008-12-23 23:24:13 -05002591 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002592
2593 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002594 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2595 if (!trace_print_lat_context(iter))
2596 goto partial;
2597 } else {
2598 if (!trace_print_context(iter))
2599 goto partial;
2600 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002601 }
2602
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002603 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002604 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002605
2606 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2607 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002608
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002609 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002610partial:
2611 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002612}
2613
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002614static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002615{
2616 struct trace_seq *s = &iter->seq;
2617 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002618 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002619
2620 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002621
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002622 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002623 if (!trace_seq_printf(s, "%d %d %llu ",
2624 entry->pid, iter->cpu, iter->ts))
2625 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002626 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002627
Steven Rostedtf633cef2008-12-23 23:24:13 -05002628 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002629 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002630 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002631
2632 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2633 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002634
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002635 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002636partial:
2637 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002638}
2639
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002640static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002641{
2642 struct trace_seq *s = &iter->seq;
2643 unsigned char newline = '\n';
2644 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002645 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002646
2647 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002648
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002649 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2650 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2651 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2652 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2653 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002654
Steven Rostedtf633cef2008-12-23 23:24:13 -05002655 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002656 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002657 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002658 if (ret != TRACE_TYPE_HANDLED)
2659 return ret;
2660 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002661
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002662 SEQ_PUT_FIELD_RET(s, newline);
2663
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002664 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002665}
2666
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002667static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002668{
2669 struct trace_seq *s = &iter->seq;
2670 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002671 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002672
2673 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002674
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002675 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2676 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002677 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002678 SEQ_PUT_FIELD_RET(s, iter->ts);
2679 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002680
Steven Rostedtf633cef2008-12-23 23:24:13 -05002681 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002682 return event ? event->funcs->binary(iter, 0, event) :
2683 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002684}
2685
Jiri Olsa62b915f2010-04-02 19:01:22 +02002686int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002687{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002688 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002689 int cpu;
2690
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002691 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002692 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002693 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002694 buf_iter = trace_buffer_iter(iter, cpu);
2695 if (buf_iter) {
2696 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002697 return 0;
2698 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002699 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002700 return 0;
2701 }
2702 return 1;
2703 }
2704
Steven Rostedtab464282008-05-12 21:21:00 +02002705 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002706 buf_iter = trace_buffer_iter(iter, cpu);
2707 if (buf_iter) {
2708 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002709 return 0;
2710 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002711 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002712 return 0;
2713 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002714 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002715
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002716 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002717}
2718
Lai Jiangshan4f535962009-05-18 19:35:34 +08002719/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002720enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002721{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002722 enum print_line_t ret;
2723
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002724 if (iter->lost_events &&
2725 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2726 iter->cpu, iter->lost_events))
2727 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002728
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002729 if (iter->trace && iter->trace->print_line) {
2730 ret = iter->trace->print_line(iter);
2731 if (ret != TRACE_TYPE_UNHANDLED)
2732 return ret;
2733 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002734
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002735 if (iter->ent->type == TRACE_BPUTS &&
2736 trace_flags & TRACE_ITER_PRINTK &&
2737 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2738 return trace_print_bputs_msg_only(iter);
2739
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002740 if (iter->ent->type == TRACE_BPRINT &&
2741 trace_flags & TRACE_ITER_PRINTK &&
2742 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002743 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002744
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002745 if (iter->ent->type == TRACE_PRINT &&
2746 trace_flags & TRACE_ITER_PRINTK &&
2747 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002748 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002749
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002750 if (trace_flags & TRACE_ITER_BIN)
2751 return print_bin_fmt(iter);
2752
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002753 if (trace_flags & TRACE_ITER_HEX)
2754 return print_hex_fmt(iter);
2755
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002756 if (trace_flags & TRACE_ITER_RAW)
2757 return print_raw_fmt(iter);
2758
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002759 return print_trace_fmt(iter);
2760}
2761
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002762void trace_latency_header(struct seq_file *m)
2763{
2764 struct trace_iterator *iter = m->private;
2765
2766 /* print nothing if the buffers are empty */
2767 if (trace_empty(iter))
2768 return;
2769
2770 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2771 print_trace_header(m, iter);
2772
2773 if (!(trace_flags & TRACE_ITER_VERBOSE))
2774 print_lat_help_header(m);
2775}
2776
Jiri Olsa62b915f2010-04-02 19:01:22 +02002777void trace_default_header(struct seq_file *m)
2778{
2779 struct trace_iterator *iter = m->private;
2780
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002781 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2782 return;
2783
Jiri Olsa62b915f2010-04-02 19:01:22 +02002784 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2785 /* print nothing if the buffers are empty */
2786 if (trace_empty(iter))
2787 return;
2788 print_trace_header(m, iter);
2789 if (!(trace_flags & TRACE_ITER_VERBOSE))
2790 print_lat_help_header(m);
2791 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002792 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2793 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002794 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002795 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002796 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002797 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002798 }
2799}
2800
Steven Rostedte0a413f2011-09-29 21:26:16 -04002801static void test_ftrace_alive(struct seq_file *m)
2802{
2803 if (!ftrace_is_dead())
2804 return;
2805 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2806 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2807}
2808
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002809#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002810static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002811{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002812 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2813 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2814 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002815 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002816 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2817 seq_printf(m, "# is not a '0' or '1')\n");
2818}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002819
2820static void show_snapshot_percpu_help(struct seq_file *m)
2821{
2822 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2823#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2824 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2825 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2826#else
2827 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2828 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2829#endif
2830 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2831 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2832 seq_printf(m, "# is not a '0' or '1')\n");
2833}
2834
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002835static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2836{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002837 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002838 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2839 else
2840 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2841
2842 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002843 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2844 show_snapshot_main_help(m);
2845 else
2846 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002847}
2848#else
2849/* Should never be called */
2850static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2851#endif
2852
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002853static int s_show(struct seq_file *m, void *v)
2854{
2855 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002856 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002857
2858 if (iter->ent == NULL) {
2859 if (iter->tr) {
2860 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2861 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002862 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002863 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002864 if (iter->snapshot && trace_empty(iter))
2865 print_snapshot_help(m, iter);
2866 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002867 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002868 else
2869 trace_default_header(m);
2870
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002871 } else if (iter->leftover) {
2872 /*
2873 * If we filled the seq_file buffer earlier, we
2874 * want to just show it now.
2875 */
2876 ret = trace_print_seq(m, &iter->seq);
2877
2878 /* ret should this time be zero, but you never know */
2879 iter->leftover = ret;
2880
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002881 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002882 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002883 ret = trace_print_seq(m, &iter->seq);
2884 /*
2885 * If we overflow the seq_file buffer, then it will
2886 * ask us for this data again at start up.
2887 * Use that instead.
2888 * ret is 0 if seq_file write succeeded.
2889 * -1 otherwise.
2890 */
2891 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002892 }
2893
2894 return 0;
2895}
2896
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002897/*
2898 * Should be used after trace_array_get(), trace_types_lock
2899 * ensures that i_cdev was already initialized.
2900 */
2901static inline int tracing_get_cpu(struct inode *inode)
2902{
2903 if (inode->i_cdev) /* See trace_create_cpu_file() */
2904 return (long)inode->i_cdev - 1;
2905 return RING_BUFFER_ALL_CPUS;
2906}
2907
James Morris88e9d342009-09-22 16:43:43 -07002908static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002909 .start = s_start,
2910 .next = s_next,
2911 .stop = s_stop,
2912 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002913};
2914
Ingo Molnare309b412008-05-12 21:20:51 +02002915static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002916__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002917{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002918 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002920 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002921
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002922 if (tracing_disabled)
2923 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002924
Jiri Olsa50e18b92012-04-25 10:23:39 +02002925 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002926 if (!iter)
2927 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002928
Steven Rostedt6d158a82012-06-27 20:46:14 -04002929 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2930 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002931 if (!iter->buffer_iter)
2932 goto release;
2933
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002934 /*
2935 * We make a copy of the current tracer to avoid concurrent
2936 * changes on it while we are reading.
2937 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002938 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002939 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002940 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002941 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002942
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002943 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002944
Li Zefan79f55992009-06-15 14:58:26 +08002945 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002946 goto fail;
2947
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002948 iter->tr = tr;
2949
2950#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002951 /* Currently only the top directory has a snapshot */
2952 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002953 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002954 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002955#endif
2956 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002957 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002958 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002959 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002960 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002962 /* Notify the tracer early; before we stop tracing. */
2963 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002964 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002965
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002966 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002967 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002968 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2969
David Sharp8be07092012-11-13 12:18:22 -08002970 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002971 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002972 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2973
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002974 /* stop the trace while dumping if we are not opening "snapshot" */
2975 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002976 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002977
Steven Rostedtae3b5092013-01-23 15:22:59 -05002978 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002979 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002980 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002981 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002982 }
2983 ring_buffer_read_prepare_sync();
2984 for_each_tracing_cpu(cpu) {
2985 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002986 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002987 }
2988 } else {
2989 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002990 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002991 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002992 ring_buffer_read_prepare_sync();
2993 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002994 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002995 }
2996
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002997 mutex_unlock(&trace_types_lock);
2998
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002999 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003000
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003001 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003002 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003003 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003004 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003005release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003006 seq_release_private(inode, file);
3007 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003008}
3009
3010int tracing_open_generic(struct inode *inode, struct file *filp)
3011{
Steven Rostedt60a11772008-05-12 21:20:44 +02003012 if (tracing_disabled)
3013 return -ENODEV;
3014
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015 filp->private_data = inode->i_private;
3016 return 0;
3017}
3018
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003019bool tracing_is_disabled(void)
3020{
3021 return (tracing_disabled) ? true: false;
3022}
3023
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003024/*
3025 * Open and update trace_array ref count.
3026 * Must have the current trace_array passed to it.
3027 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003028static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003029{
3030 struct trace_array *tr = inode->i_private;
3031
3032 if (tracing_disabled)
3033 return -ENODEV;
3034
3035 if (trace_array_get(tr) < 0)
3036 return -ENODEV;
3037
3038 filp->private_data = inode->i_private;
3039
3040 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003041}
3042
Hannes Eder4fd27352009-02-10 19:44:12 +01003043static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003044{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003045 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003046 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003047 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003048 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003049
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003050 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003051 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003052 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003053 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003054
Oleg Nesterov6484c712013-07-23 17:26:10 +02003055 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003056 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003058
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003059 for_each_tracing_cpu(cpu) {
3060 if (iter->buffer_iter[cpu])
3061 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3062 }
3063
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064 if (iter->trace && iter->trace->close)
3065 iter->trace->close(iter);
3066
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003067 if (!iter->snapshot)
3068 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003069 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003070
3071 __trace_array_put(tr);
3072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003073 mutex_unlock(&trace_types_lock);
3074
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003075 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003076 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003077 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003078 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003079 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003080
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003081 return 0;
3082}
3083
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003084static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3085{
3086 struct trace_array *tr = inode->i_private;
3087
3088 trace_array_put(tr);
3089 return 0;
3090}
3091
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003092static int tracing_single_release_tr(struct inode *inode, struct file *file)
3093{
3094 struct trace_array *tr = inode->i_private;
3095
3096 trace_array_put(tr);
3097
3098 return single_release(inode, file);
3099}
3100
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003101static int tracing_open(struct inode *inode, struct file *file)
3102{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003103 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003104 struct trace_iterator *iter;
3105 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003106
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003107 if (trace_array_get(tr) < 0)
3108 return -ENODEV;
3109
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003110 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3112 int cpu = tracing_get_cpu(inode);
3113
3114 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003115 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003117 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003118 }
3119
3120 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003121 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 if (IS_ERR(iter))
3123 ret = PTR_ERR(iter);
3124 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3125 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3126 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003127
3128 if (ret < 0)
3129 trace_array_put(tr);
3130
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003131 return ret;
3132}
3133
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003134/*
3135 * Some tracers are not suitable for instance buffers.
3136 * A tracer is always available for the global array (toplevel)
3137 * or if it explicitly states that it is.
3138 */
3139static bool
3140trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3141{
3142 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3143}
3144
3145/* Find the next tracer that this trace array may use */
3146static struct tracer *
3147get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3148{
3149 while (t && !trace_ok_for_array(t, tr))
3150 t = t->next;
3151
3152 return t;
3153}
3154
Ingo Molnare309b412008-05-12 21:20:51 +02003155static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156t_next(struct seq_file *m, void *v, loff_t *pos)
3157{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003158 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003159 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003160
3161 (*pos)++;
3162
3163 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003164 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003165
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003166 return t;
3167}
3168
3169static void *t_start(struct seq_file *m, loff_t *pos)
3170{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003171 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003172 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003173 loff_t l = 0;
3174
3175 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003176
3177 t = get_tracer_for_array(tr, trace_types);
3178 for (; t && l < *pos; t = t_next(m, t, &l))
3179 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003180
3181 return t;
3182}
3183
3184static void t_stop(struct seq_file *m, void *p)
3185{
3186 mutex_unlock(&trace_types_lock);
3187}
3188
3189static int t_show(struct seq_file *m, void *v)
3190{
3191 struct tracer *t = v;
3192
3193 if (!t)
3194 return 0;
3195
3196 seq_printf(m, "%s", t->name);
3197 if (t->next)
3198 seq_putc(m, ' ');
3199 else
3200 seq_putc(m, '\n');
3201
3202 return 0;
3203}
3204
James Morris88e9d342009-09-22 16:43:43 -07003205static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003206 .start = t_start,
3207 .next = t_next,
3208 .stop = t_stop,
3209 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003210};
3211
3212static int show_traces_open(struct inode *inode, struct file *file)
3213{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003214 struct trace_array *tr = inode->i_private;
3215 struct seq_file *m;
3216 int ret;
3217
Steven Rostedt60a11772008-05-12 21:20:44 +02003218 if (tracing_disabled)
3219 return -ENODEV;
3220
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003221 ret = seq_open(file, &show_traces_seq_ops);
3222 if (ret)
3223 return ret;
3224
3225 m = file->private_data;
3226 m->private = tr;
3227
3228 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229}
3230
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003231static ssize_t
3232tracing_write_stub(struct file *filp, const char __user *ubuf,
3233 size_t count, loff_t *ppos)
3234{
3235 return count;
3236}
3237
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003238loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003239{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003240 int ret;
3241
Slava Pestov364829b2010-11-24 15:13:16 -08003242 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003243 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003244 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003245 file->f_pos = ret = 0;
3246
3247 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003248}
3249
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003250static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003251 .open = tracing_open,
3252 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003253 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003254 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003255 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003256};
3257
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003258static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003259 .open = show_traces_open,
3260 .read = seq_read,
3261 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003262 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003263};
3264
Ingo Molnar36dfe922008-05-12 21:20:52 +02003265/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003266 * The tracer itself will not take this lock, but still we want
3267 * to provide a consistent cpumask to user-space:
3268 */
3269static DEFINE_MUTEX(tracing_cpumask_update_lock);
3270
3271/*
3272 * Temporary storage for the character representation of the
3273 * CPU bitmask (and one more byte for the newline):
3274 */
3275static char mask_str[NR_CPUS + 1];
3276
Ingo Molnarc7078de2008-05-12 21:20:52 +02003277static ssize_t
3278tracing_cpumask_read(struct file *filp, char __user *ubuf,
3279 size_t count, loff_t *ppos)
3280{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003281 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003282 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003283
3284 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003285
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003286 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003287 if (count - len < 2) {
3288 count = -EINVAL;
3289 goto out_err;
3290 }
3291 len += sprintf(mask_str + len, "\n");
3292 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3293
3294out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003295 mutex_unlock(&tracing_cpumask_update_lock);
3296
3297 return count;
3298}
3299
3300static ssize_t
3301tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3303{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003304 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303305 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003306 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303307
3308 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3309 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003310
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303311 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003312 if (err)
3313 goto err_unlock;
3314
Li Zefan215368e2009-06-15 10:56:42 +08003315 mutex_lock(&tracing_cpumask_update_lock);
3316
Steven Rostedta5e25882008-12-02 15:34:05 -05003317 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003318 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003319 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003320 /*
3321 * Increase/decrease the disabled counter if we are
3322 * about to flip a bit in the cpumask:
3323 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003324 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303325 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003326 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3327 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003328 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003329 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303330 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003331 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3332 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003333 }
3334 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003335 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003336 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003337
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003338 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003339
Ingo Molnarc7078de2008-05-12 21:20:52 +02003340 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303341 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003342
Ingo Molnarc7078de2008-05-12 21:20:52 +02003343 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003344
3345err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003346 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003347
3348 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003349}
3350
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003351static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003352 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003353 .read = tracing_cpumask_read,
3354 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003355 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003356 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003357};
3358
Li Zefanfdb372e2009-12-08 11:15:59 +08003359static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003360{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003361 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003362 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003363 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003364 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003365
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003366 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003367 tracer_flags = tr->current_trace->flags->val;
3368 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003369
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003370 for (i = 0; trace_options[i]; i++) {
3371 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003372 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003373 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003374 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003375 }
3376
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003377 for (i = 0; trace_opts[i].name; i++) {
3378 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003379 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003380 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003381 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003382 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003383 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003384
Li Zefanfdb372e2009-12-08 11:15:59 +08003385 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386}
3387
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003388static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003389 struct tracer_flags *tracer_flags,
3390 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003391{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003392 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003393 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003394
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003395 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003396 if (ret)
3397 return ret;
3398
3399 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003400 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003401 else
Zhaolei77708412009-08-07 18:53:21 +08003402 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003403 return 0;
3404}
3405
Li Zefan8d18eaa2009-12-08 11:17:06 +08003406/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003407static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003408{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003409 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003410 struct tracer_flags *tracer_flags = trace->flags;
3411 struct tracer_opt *opts = NULL;
3412 int i;
3413
3414 for (i = 0; tracer_flags->opts[i].name; i++) {
3415 opts = &tracer_flags->opts[i];
3416
3417 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003418 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003419 }
3420
3421 return -EINVAL;
3422}
3423
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003424/* Some tracers require overwrite to stay enabled */
3425int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3426{
3427 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3428 return -1;
3429
3430 return 0;
3431}
3432
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003433int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003434{
3435 /* do nothing if flag is already set */
3436 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003437 return 0;
3438
3439 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003440 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003441 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003442 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003443
3444 if (enabled)
3445 trace_flags |= mask;
3446 else
3447 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003448
3449 if (mask == TRACE_ITER_RECORD_CMD)
3450 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003451
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003452 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003453 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003454#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003455 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003456#endif
3457 }
Steven Rostedt81698832012-10-11 10:15:05 -04003458
3459 if (mask == TRACE_ITER_PRINTK)
3460 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003461
3462 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003463}
3464
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003465static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003467 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003468 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003469 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003470 int i;
3471
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003472 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003473
Li Zefan8d18eaa2009-12-08 11:17:06 +08003474 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003475 neg = 1;
3476 cmp += 2;
3477 }
3478
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003479 mutex_lock(&trace_types_lock);
3480
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003481 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003482 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003483 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484 break;
3485 }
3486 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003487
3488 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003489 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003490 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003491
3492 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003493
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003494 return ret;
3495}
3496
3497static ssize_t
3498tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3499 size_t cnt, loff_t *ppos)
3500{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003501 struct seq_file *m = filp->private_data;
3502 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003503 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003504 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003505
3506 if (cnt >= sizeof(buf))
3507 return -EINVAL;
3508
3509 if (copy_from_user(&buf, ubuf, cnt))
3510 return -EFAULT;
3511
Steven Rostedta8dd2172013-01-09 20:54:17 -05003512 buf[cnt] = 0;
3513
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003514 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003515 if (ret < 0)
3516 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003517
Jiri Olsacf8517c2009-10-23 19:36:16 -04003518 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003519
3520 return cnt;
3521}
3522
Li Zefanfdb372e2009-12-08 11:15:59 +08003523static int tracing_trace_options_open(struct inode *inode, struct file *file)
3524{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003525 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003526 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003527
Li Zefanfdb372e2009-12-08 11:15:59 +08003528 if (tracing_disabled)
3529 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003530
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003531 if (trace_array_get(tr) < 0)
3532 return -ENODEV;
3533
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003534 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3535 if (ret < 0)
3536 trace_array_put(tr);
3537
3538 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003539}
3540
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003541static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003542 .open = tracing_trace_options_open,
3543 .read = seq_read,
3544 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003545 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003546 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003547};
3548
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003549static const char readme_msg[] =
3550 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003551 "# echo 0 > tracing_on : quick way to disable tracing\n"
3552 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3553 " Important files:\n"
3554 " trace\t\t\t- The static contents of the buffer\n"
3555 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3556 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3557 " current_tracer\t- function and latency tracers\n"
3558 " available_tracers\t- list of configured tracers for current_tracer\n"
3559 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3560 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3561 " trace_clock\t\t-change the clock used to order events\n"
3562 " local: Per cpu clock but may not be synced across CPUs\n"
3563 " global: Synced across CPUs but slows tracing down.\n"
3564 " counter: Not a clock, but just an increment\n"
3565 " uptime: Jiffy counter from time of boot\n"
3566 " perf: Same clock that perf events use\n"
3567#ifdef CONFIG_X86_64
3568 " x86-tsc: TSC cycle counter\n"
3569#endif
3570 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3571 " tracing_cpumask\t- Limit which CPUs to trace\n"
3572 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3573 "\t\t\t Remove sub-buffer with rmdir\n"
3574 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003575 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3576 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003577#ifdef CONFIG_DYNAMIC_FTRACE
3578 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003579 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3580 "\t\t\t functions\n"
3581 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3582 "\t modules: Can select a group via module\n"
3583 "\t Format: :mod:<module-name>\n"
3584 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3585 "\t triggers: a command to perform when function is hit\n"
3586 "\t Format: <function>:<trigger>[:count]\n"
3587 "\t trigger: traceon, traceoff\n"
3588 "\t\t enable_event:<system>:<event>\n"
3589 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003590#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003591 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003592#endif
3593#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003594 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003595#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003596 "\t\t dump\n"
3597 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003598 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3599 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3600 "\t The first one will disable tracing every time do_fault is hit\n"
3601 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3602 "\t The first time do trap is hit and it disables tracing, the\n"
3603 "\t counter will decrement to 2. If tracing is already disabled,\n"
3604 "\t the counter will not decrement. It only decrements when the\n"
3605 "\t trigger did work\n"
3606 "\t To remove trigger without count:\n"
3607 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3608 "\t To remove trigger with a count:\n"
3609 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003610 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003611 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3612 "\t modules: Can select a group via module command :mod:\n"
3613 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003614#endif /* CONFIG_DYNAMIC_FTRACE */
3615#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003616 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3617 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003618#endif
3619#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3620 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3621 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3622#endif
3623#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003624 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3625 "\t\t\t snapshot buffer. Read the contents for more\n"
3626 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003627#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003628#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003629 " stack_trace\t\t- Shows the max stack trace when active\n"
3630 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003631 "\t\t\t Write into this file to reset the max size (trigger a\n"
3632 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003633#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003634 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3635 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003636#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003637#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003638 " events/\t\t- Directory containing all trace event subsystems:\n"
3639 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3640 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003641 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3642 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003643 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003644 " events/<system>/<event>/\t- Directory containing control files for\n"
3645 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003646 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3647 " filter\t\t- If set, only events passing filter are traced\n"
3648 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003649 "\t Format: <trigger>[:count][if <filter>]\n"
3650 "\t trigger: traceon, traceoff\n"
3651 "\t enable_event:<system>:<event>\n"
3652 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003653#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003654 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003655#endif
3656#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003657 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003658#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003659 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3660 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3661 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3662 "\t events/block/block_unplug/trigger\n"
3663 "\t The first disables tracing every time block_unplug is hit.\n"
3664 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3665 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3666 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3667 "\t Like function triggers, the counter is only decremented if it\n"
3668 "\t enabled or disabled tracing.\n"
3669 "\t To remove a trigger without a count:\n"
3670 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3671 "\t To remove a trigger with a count:\n"
3672 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3673 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003674;
3675
3676static ssize_t
3677tracing_readme_read(struct file *filp, char __user *ubuf,
3678 size_t cnt, loff_t *ppos)
3679{
3680 return simple_read_from_buffer(ubuf, cnt, ppos,
3681 readme_msg, strlen(readme_msg));
3682}
3683
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003684static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003685 .open = tracing_open_generic,
3686 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003687 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003688};
3689
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003690static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003691tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3692 size_t cnt, loff_t *ppos)
3693{
3694 char *buf_comm;
3695 char *file_buf;
3696 char *buf;
3697 int len = 0;
3698 int pid;
3699 int i;
3700
3701 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3702 if (!file_buf)
3703 return -ENOMEM;
3704
3705 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3706 if (!buf_comm) {
3707 kfree(file_buf);
3708 return -ENOMEM;
3709 }
3710
3711 buf = file_buf;
3712
3713 for (i = 0; i < SAVED_CMDLINES; i++) {
3714 int r;
3715
3716 pid = map_cmdline_to_pid[i];
3717 if (pid == -1 || pid == NO_CMDLINE_MAP)
3718 continue;
3719
3720 trace_find_cmdline(pid, buf_comm);
3721 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3722 buf += r;
3723 len += r;
3724 }
3725
3726 len = simple_read_from_buffer(ubuf, cnt, ppos,
3727 file_buf, len);
3728
3729 kfree(file_buf);
3730 kfree(buf_comm);
3731
3732 return len;
3733}
3734
3735static const struct file_operations tracing_saved_cmdlines_fops = {
3736 .open = tracing_open_generic,
3737 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003738 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003739};
3740
3741static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003742tracing_set_trace_read(struct file *filp, char __user *ubuf,
3743 size_t cnt, loff_t *ppos)
3744{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003745 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003746 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003747 int r;
3748
3749 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003750 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003751 mutex_unlock(&trace_types_lock);
3752
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003753 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003754}
3755
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003756int tracer_init(struct tracer *t, struct trace_array *tr)
3757{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003758 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003759 return t->init(tr);
3760}
3761
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003762static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003763{
3764 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003765
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003766 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003767 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003768}
3769
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003770#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003771/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003772static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3773 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003774{
3775 int cpu, ret = 0;
3776
3777 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3778 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003779 ret = ring_buffer_resize(trace_buf->buffer,
3780 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003781 if (ret < 0)
3782 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003783 per_cpu_ptr(trace_buf->data, cpu)->entries =
3784 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003785 }
3786 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003787 ret = ring_buffer_resize(trace_buf->buffer,
3788 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003789 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003790 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3791 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003792 }
3793
3794 return ret;
3795}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003796#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003797
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003798static int __tracing_resize_ring_buffer(struct trace_array *tr,
3799 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003800{
3801 int ret;
3802
3803 /*
3804 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003805 * we use the size that was given, and we can forget about
3806 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003807 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003808 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003809
Steven Rostedtb382ede62012-10-10 21:44:34 -04003810 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003811 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003812 return 0;
3813
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003814 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003815 if (ret < 0)
3816 return ret;
3817
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003818#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003819 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3820 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003821 goto out;
3822
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003823 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003824 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003825 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3826 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003827 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003828 /*
3829 * AARGH! We are left with different
3830 * size max buffer!!!!
3831 * The max buffer is our "snapshot" buffer.
3832 * When a tracer needs a snapshot (one of the
3833 * latency tracers), it swaps the max buffer
3834 * with the saved snap shot. We succeeded to
3835 * update the size of the main buffer, but failed to
3836 * update the size of the max buffer. But when we tried
3837 * to reset the main buffer to the original size, we
3838 * failed there too. This is very unlikely to
3839 * happen, but if it does, warn and kill all
3840 * tracing.
3841 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003842 WARN_ON(1);
3843 tracing_disabled = 1;
3844 }
3845 return ret;
3846 }
3847
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003848 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003849 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003850 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003851 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003852
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003853 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003854#endif /* CONFIG_TRACER_MAX_TRACE */
3855
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003856 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003857 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003858 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003859 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003860
3861 return ret;
3862}
3863
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003864static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3865 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003866{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003867 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003868
3869 mutex_lock(&trace_types_lock);
3870
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003871 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3872 /* make sure, this cpu is enabled in the mask */
3873 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3874 ret = -EINVAL;
3875 goto out;
3876 }
3877 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003878
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003879 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003880 if (ret < 0)
3881 ret = -ENOMEM;
3882
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003883out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003884 mutex_unlock(&trace_types_lock);
3885
3886 return ret;
3887}
3888
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003889
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003890/**
3891 * tracing_update_buffers - used by tracing facility to expand ring buffers
3892 *
3893 * To save on memory when the tracing is never used on a system with it
3894 * configured in. The ring buffers are set to a minimum size. But once
3895 * a user starts to use the tracing facility, then they need to grow
3896 * to their default size.
3897 *
3898 * This function is to be called when a tracer is about to be used.
3899 */
3900int tracing_update_buffers(void)
3901{
3902 int ret = 0;
3903
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003904 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003905 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003906 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003907 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003908 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003909
3910 return ret;
3911}
3912
Steven Rostedt577b7852009-02-26 23:43:05 -05003913struct trace_option_dentry;
3914
3915static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003916create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003917
3918static void
3919destroy_trace_option_files(struct trace_option_dentry *topts);
3920
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003921/*
3922 * Used to clear out the tracer before deletion of an instance.
3923 * Must have trace_types_lock held.
3924 */
3925static void tracing_set_nop(struct trace_array *tr)
3926{
3927 if (tr->current_trace == &nop_trace)
3928 return;
3929
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003930 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003931
3932 if (tr->current_trace->reset)
3933 tr->current_trace->reset(tr);
3934
3935 tr->current_trace = &nop_trace;
3936}
3937
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003938static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003939{
Steven Rostedt577b7852009-02-26 23:43:05 -05003940 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003941 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003942#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003943 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003944#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003945 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003946
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003947 mutex_lock(&trace_types_lock);
3948
Steven Rostedt73c51622009-03-11 13:42:01 -04003949 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003950 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003951 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003952 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003953 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003954 ret = 0;
3955 }
3956
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003957 for (t = trace_types; t; t = t->next) {
3958 if (strcmp(t->name, buf) == 0)
3959 break;
3960 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003961 if (!t) {
3962 ret = -EINVAL;
3963 goto out;
3964 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003965 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003966 goto out;
3967
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003968 /* Some tracers are only allowed for the top level buffer */
3969 if (!trace_ok_for_array(t, tr)) {
3970 ret = -EINVAL;
3971 goto out;
3972 }
3973
Steven Rostedt9f029e82008-11-12 15:24:24 -05003974 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003975
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003976 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003977
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003978 if (tr->current_trace->reset)
3979 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003983
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003984#ifdef CONFIG_TRACER_MAX_TRACE
3985 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003986
3987 if (had_max_tr && !t->use_max_tr) {
3988 /*
3989 * We need to make sure that the update_max_tr sees that
3990 * current_trace changed to nop_trace to keep it from
3991 * swapping the buffers after we resize it.
3992 * The update_max_tr is called from interrupts disabled
3993 * so a synchronized_sched() is sufficient.
3994 */
3995 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003996 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003997 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003998#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05003999 /* Currently, only the top instance has options */
4000 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4001 destroy_trace_option_files(topts);
4002 topts = create_trace_option_files(tr, t);
4003 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004004
4005#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004006 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004007 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004008 if (ret < 0)
4009 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004010 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004011#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004012
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004013 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004014 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004015 if (ret)
4016 goto out;
4017 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004018
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004019 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004020 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004021 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004022 out:
4023 mutex_unlock(&trace_types_lock);
4024
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004025 return ret;
4026}
4027
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004028static ssize_t
4029tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4030 size_t cnt, loff_t *ppos)
4031{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004032 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004033 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004034 int i;
4035 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004036 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004037
Steven Rostedt60063a62008-10-28 10:44:24 -04004038 ret = cnt;
4039
Li Zefanee6c2c12009-09-18 14:06:47 +08004040 if (cnt > MAX_TRACER_SIZE)
4041 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004042
4043 if (copy_from_user(&buf, ubuf, cnt))
4044 return -EFAULT;
4045
4046 buf[cnt] = 0;
4047
4048 /* strip ending whitespace. */
4049 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4050 buf[i] = 0;
4051
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004052 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004053 if (err)
4054 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004055
Jiri Olsacf8517c2009-10-23 19:36:16 -04004056 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004057
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004058 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004059}
4060
4061static ssize_t
4062tracing_max_lat_read(struct file *filp, char __user *ubuf,
4063 size_t cnt, loff_t *ppos)
4064{
4065 unsigned long *ptr = filp->private_data;
4066 char buf[64];
4067 int r;
4068
Steven Rostedtcffae432008-05-12 21:21:00 +02004069 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004070 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004071 if (r > sizeof(buf))
4072 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004073 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004074}
4075
4076static ssize_t
4077tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4078 size_t cnt, loff_t *ppos)
4079{
Hannes Eder5e398412009-02-10 19:44:34 +01004080 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004081 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004082 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004083
Peter Huewe22fe9b52011-06-07 21:58:27 +02004084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4085 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004086 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004087
4088 *ptr = val * 1000;
4089
4090 return cnt;
4091}
4092
Steven Rostedtb3806b42008-05-12 21:20:46 +02004093static int tracing_open_pipe(struct inode *inode, struct file *filp)
4094{
Oleg Nesterov15544202013-07-23 17:25:57 +02004095 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004096 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004097 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004098
4099 if (tracing_disabled)
4100 return -ENODEV;
4101
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004102 if (trace_array_get(tr) < 0)
4103 return -ENODEV;
4104
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004105 mutex_lock(&trace_types_lock);
4106
Steven Rostedtb3806b42008-05-12 21:20:46 +02004107 /* create a buffer to store the information to pass to userspace */
4108 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004109 if (!iter) {
4110 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004111 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004112 goto out;
4113 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004114
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004115 /*
4116 * We make a copy of the current tracer to avoid concurrent
4117 * changes on it while we are reading.
4118 */
4119 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4120 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004121 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004122 goto fail;
4123 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004124 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004125
4126 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4127 ret = -ENOMEM;
4128 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304129 }
4130
Steven Rostedta3097202008-11-07 22:36:02 -05004131 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304132 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004133
Steven Rostedt112f38a72009-06-01 15:16:05 -04004134 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4135 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4136
David Sharp8be07092012-11-13 12:18:22 -08004137 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004138 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004139 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4140
Oleg Nesterov15544202013-07-23 17:25:57 +02004141 iter->tr = tr;
4142 iter->trace_buffer = &tr->trace_buffer;
4143 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004144 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004145 filp->private_data = iter;
4146
Steven Rostedt107bad82008-05-12 21:21:01 +02004147 if (iter->trace->pipe_open)
4148 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004149
Arnd Bergmannb4447862010-07-07 23:40:11 +02004150 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004151out:
4152 mutex_unlock(&trace_types_lock);
4153 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004154
4155fail:
4156 kfree(iter->trace);
4157 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004158 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004159 mutex_unlock(&trace_types_lock);
4160 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004161}
4162
4163static int tracing_release_pipe(struct inode *inode, struct file *file)
4164{
4165 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004166 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004167
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004168 mutex_lock(&trace_types_lock);
4169
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004170 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004171 iter->trace->pipe_close(iter);
4172
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004173 mutex_unlock(&trace_types_lock);
4174
Rusty Russell44623442009-01-01 10:12:23 +10304175 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004176 mutex_destroy(&iter->mutex);
4177 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004178 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004179
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004180 trace_array_put(tr);
4181
Steven Rostedtb3806b42008-05-12 21:20:46 +02004182 return 0;
4183}
4184
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004185static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004186trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004187{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004188 /* Iterators are static, they should be filled or empty */
4189 if (trace_buffer_iter(iter, iter->cpu_file))
4190 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004191
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004192 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004193 /*
4194 * Always select as readable when in blocking mode
4195 */
4196 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004197 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004198 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004199 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004200}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004201
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004202static unsigned int
4203tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4204{
4205 struct trace_iterator *iter = filp->private_data;
4206
4207 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004208}
4209
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004210/*
4211 * This is a make-shift waitqueue.
4212 * A tracer might use this callback on some rare cases:
4213 *
4214 * 1) the current tracer might hold the runqueue lock when it wakes up
4215 * a reader, hence a deadlock (sched, function, and function graph tracers)
4216 * 2) the function tracers, trace all functions, we don't want
4217 * the overhead of calling wake_up and friends
4218 * (and tracing them too)
4219 *
4220 * Anyway, this is really very primitive wakeup.
4221 */
4222void poll_wait_pipe(struct trace_iterator *iter)
4223{
4224 set_current_state(TASK_INTERRUPTIBLE);
4225 /* sleep for 100 msecs, and try again. */
4226 schedule_timeout(HZ / 10);
4227}
4228
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004229/* Must be called with trace_types_lock mutex held. */
4230static int tracing_wait_pipe(struct file *filp)
4231{
4232 struct trace_iterator *iter = filp->private_data;
4233
4234 while (trace_empty(iter)) {
4235
4236 if ((filp->f_flags & O_NONBLOCK)) {
4237 return -EAGAIN;
4238 }
4239
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004240 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004241 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004242 * We still block if tracing is disabled, but we have never
4243 * read anything. This allows a user to cat this file, and
4244 * then enable tracing. But after we have read something,
4245 * we give an EOF when tracing is again disabled.
4246 *
4247 * iter->pos will be 0 if we haven't read anything.
4248 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004249 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004250 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004251
4252 mutex_unlock(&iter->mutex);
4253
4254 iter->trace->wait_pipe(iter);
4255
4256 mutex_lock(&iter->mutex);
4257
4258 if (signal_pending(current))
4259 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004260 }
4261
4262 return 1;
4263}
4264
Steven Rostedtb3806b42008-05-12 21:20:46 +02004265/*
4266 * Consumer reader.
4267 */
4268static ssize_t
4269tracing_read_pipe(struct file *filp, char __user *ubuf,
4270 size_t cnt, loff_t *ppos)
4271{
4272 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004273 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004274 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004275
4276 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004277 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4278 if (sret != -EBUSY)
4279 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004280
Steven Rostedtf9520752009-03-02 14:04:40 -05004281 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004282
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004283 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004284 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004285 if (unlikely(iter->trace->name != tr->current_trace->name))
4286 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004287 mutex_unlock(&trace_types_lock);
4288
4289 /*
4290 * Avoid more than one consumer on a single file descriptor
4291 * This is just a matter of traces coherency, the ring buffer itself
4292 * is protected.
4293 */
4294 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004295 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004296 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4297 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004298 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004299 }
4300
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004301waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004302 sret = tracing_wait_pipe(filp);
4303 if (sret <= 0)
4304 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004305
4306 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004307 if (trace_empty(iter)) {
4308 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004309 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004310 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004311
4312 if (cnt >= PAGE_SIZE)
4313 cnt = PAGE_SIZE - 1;
4314
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004315 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004316 memset(&iter->seq, 0,
4317 sizeof(struct trace_iterator) -
4318 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004319 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004320 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004321
Lai Jiangshan4f535962009-05-18 19:35:34 +08004322 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004323 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004324 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004325 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004326 int len = iter->seq.len;
4327
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004328 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004329 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004330 /* don't print partial lines */
4331 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004332 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004333 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004334 if (ret != TRACE_TYPE_NO_CONSUME)
4335 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004336
4337 if (iter->seq.len >= cnt)
4338 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004339
4340 /*
4341 * Setting the full flag means we reached the trace_seq buffer
4342 * size and we should leave by partial output condition above.
4343 * One of the trace_seq_* functions is not used properly.
4344 */
4345 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4346 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004347 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004348 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004349 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004350
Steven Rostedtb3806b42008-05-12 21:20:46 +02004351 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004352 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4353 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004354 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004355
4356 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004357 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004358 * entries, go back to wait for more entries.
4359 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004360 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004361 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004362
Steven Rostedt107bad82008-05-12 21:21:01 +02004363out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004364 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004365
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004366 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004367}
4368
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004369static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4370 unsigned int idx)
4371{
4372 __free_page(spd->pages[idx]);
4373}
4374
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004375static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004376 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004377 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004378 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004379 .steal = generic_pipe_buf_steal,
4380 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004381};
4382
Steven Rostedt34cd4992009-02-09 12:06:29 -05004383static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004384tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004385{
4386 size_t count;
4387 int ret;
4388
4389 /* Seq buffer is page-sized, exactly what we need. */
4390 for (;;) {
4391 count = iter->seq.len;
4392 ret = print_trace_line(iter);
4393 count = iter->seq.len - count;
4394 if (rem < count) {
4395 rem = 0;
4396 iter->seq.len -= count;
4397 break;
4398 }
4399 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4400 iter->seq.len -= count;
4401 break;
4402 }
4403
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004404 if (ret != TRACE_TYPE_NO_CONSUME)
4405 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004406 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004407 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004408 rem = 0;
4409 iter->ent = NULL;
4410 break;
4411 }
4412 }
4413
4414 return rem;
4415}
4416
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004417static ssize_t tracing_splice_read_pipe(struct file *filp,
4418 loff_t *ppos,
4419 struct pipe_inode_info *pipe,
4420 size_t len,
4421 unsigned int flags)
4422{
Jens Axboe35f3d142010-05-20 10:43:18 +02004423 struct page *pages_def[PIPE_DEF_BUFFERS];
4424 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004425 struct trace_iterator *iter = filp->private_data;
4426 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004427 .pages = pages_def,
4428 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004429 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004430 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004431 .flags = flags,
4432 .ops = &tracing_pipe_buf_ops,
4433 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004434 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004435 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004436 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004437 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004438 unsigned int i;
4439
Jens Axboe35f3d142010-05-20 10:43:18 +02004440 if (splice_grow_spd(pipe, &spd))
4441 return -ENOMEM;
4442
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004443 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004444 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004445 if (unlikely(iter->trace->name != tr->current_trace->name))
4446 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004447 mutex_unlock(&trace_types_lock);
4448
4449 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004450
4451 if (iter->trace->splice_read) {
4452 ret = iter->trace->splice_read(iter, filp,
4453 ppos, pipe, len, flags);
4454 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004455 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004456 }
4457
4458 ret = tracing_wait_pipe(filp);
4459 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004460 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004461
Jason Wessel955b61e2010-08-05 09:22:23 -05004462 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004463 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004464 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004465 }
4466
Lai Jiangshan4f535962009-05-18 19:35:34 +08004467 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004468 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004469
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004470 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004471 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004472 spd.pages[i] = alloc_page(GFP_KERNEL);
4473 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004474 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004475
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004476 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004477
4478 /* Copy the data into the page, so we can start over. */
4479 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004480 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004481 iter->seq.len);
4482 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004483 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004484 break;
4485 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004486 spd.partial[i].offset = 0;
4487 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004488
Steven Rostedtf9520752009-03-02 14:04:40 -05004489 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004490 }
4491
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004492 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004493 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004494 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004495
4496 spd.nr_pages = i;
4497
Jens Axboe35f3d142010-05-20 10:43:18 +02004498 ret = splice_to_pipe(pipe, &spd);
4499out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004500 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004501 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004502
Steven Rostedt34cd4992009-02-09 12:06:29 -05004503out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004504 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004505 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004506}
4507
Steven Rostedta98a3c32008-05-12 21:20:59 +02004508static ssize_t
4509tracing_entries_read(struct file *filp, char __user *ubuf,
4510 size_t cnt, loff_t *ppos)
4511{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004512 struct inode *inode = file_inode(filp);
4513 struct trace_array *tr = inode->i_private;
4514 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004515 char buf[64];
4516 int r = 0;
4517 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004518
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004519 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004520
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004521 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004522 int cpu, buf_size_same;
4523 unsigned long size;
4524
4525 size = 0;
4526 buf_size_same = 1;
4527 /* check if all cpu sizes are same */
4528 for_each_tracing_cpu(cpu) {
4529 /* fill in the size from first enabled cpu */
4530 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004531 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4532 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004533 buf_size_same = 0;
4534 break;
4535 }
4536 }
4537
4538 if (buf_size_same) {
4539 if (!ring_buffer_expanded)
4540 r = sprintf(buf, "%lu (expanded: %lu)\n",
4541 size >> 10,
4542 trace_buf_size >> 10);
4543 else
4544 r = sprintf(buf, "%lu\n", size >> 10);
4545 } else
4546 r = sprintf(buf, "X\n");
4547 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004548 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004549
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004550 mutex_unlock(&trace_types_lock);
4551
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004552 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4553 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004554}
4555
4556static ssize_t
4557tracing_entries_write(struct file *filp, const char __user *ubuf,
4558 size_t cnt, loff_t *ppos)
4559{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004560 struct inode *inode = file_inode(filp);
4561 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004562 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004563 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004564
Peter Huewe22fe9b52011-06-07 21:58:27 +02004565 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4566 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004567 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004568
4569 /* must have at least 1 entry */
4570 if (!val)
4571 return -EINVAL;
4572
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004573 /* value is in KB */
4574 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004575 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004576 if (ret < 0)
4577 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004578
Jiri Olsacf8517c2009-10-23 19:36:16 -04004579 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004580
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004581 return cnt;
4582}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004583
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004584static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004585tracing_total_entries_read(struct file *filp, char __user *ubuf,
4586 size_t cnt, loff_t *ppos)
4587{
4588 struct trace_array *tr = filp->private_data;
4589 char buf[64];
4590 int r, cpu;
4591 unsigned long size = 0, expanded_size = 0;
4592
4593 mutex_lock(&trace_types_lock);
4594 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004595 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004596 if (!ring_buffer_expanded)
4597 expanded_size += trace_buf_size >> 10;
4598 }
4599 if (ring_buffer_expanded)
4600 r = sprintf(buf, "%lu\n", size);
4601 else
4602 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4603 mutex_unlock(&trace_types_lock);
4604
4605 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4606}
4607
4608static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004609tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4610 size_t cnt, loff_t *ppos)
4611{
4612 /*
4613 * There is no need to read what the user has written, this function
4614 * is just to make sure that there is no error when "echo" is used
4615 */
4616
4617 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004618
4619 return cnt;
4620}
4621
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004622static int
4623tracing_free_buffer_release(struct inode *inode, struct file *filp)
4624{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004625 struct trace_array *tr = inode->i_private;
4626
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004627 /* disable tracing ? */
4628 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004629 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004630 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004631 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004632
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004633 trace_array_put(tr);
4634
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004635 return 0;
4636}
4637
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004638static ssize_t
4639tracing_mark_write(struct file *filp, const char __user *ubuf,
4640 size_t cnt, loff_t *fpos)
4641{
Steven Rostedtd696b582011-09-22 11:50:27 -04004642 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004643 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004644 struct ring_buffer_event *event;
4645 struct ring_buffer *buffer;
4646 struct print_entry *entry;
4647 unsigned long irq_flags;
4648 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004649 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004650 int nr_pages = 1;
4651 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004652 int offset;
4653 int size;
4654 int len;
4655 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004656 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004657
Steven Rostedtc76f0692008-11-07 22:36:02 -05004658 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004659 return -EINVAL;
4660
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004661 if (!(trace_flags & TRACE_ITER_MARKERS))
4662 return -EINVAL;
4663
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004664 if (cnt > TRACE_BUF_SIZE)
4665 cnt = TRACE_BUF_SIZE;
4666
Steven Rostedtd696b582011-09-22 11:50:27 -04004667 /*
4668 * Userspace is injecting traces into the kernel trace buffer.
4669 * We want to be as non intrusive as possible.
4670 * To do so, we do not want to allocate any special buffers
4671 * or take any locks, but instead write the userspace data
4672 * straight into the ring buffer.
4673 *
4674 * First we need to pin the userspace buffer into memory,
4675 * which, most likely it is, because it just referenced it.
4676 * But there's no guarantee that it is. By using get_user_pages_fast()
4677 * and kmap_atomic/kunmap_atomic() we can get access to the
4678 * pages directly. We then write the data directly into the
4679 * ring buffer.
4680 */
4681 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004682
Steven Rostedtd696b582011-09-22 11:50:27 -04004683 /* check if we cross pages */
4684 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4685 nr_pages = 2;
4686
4687 offset = addr & (PAGE_SIZE - 1);
4688 addr &= PAGE_MASK;
4689
4690 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4691 if (ret < nr_pages) {
4692 while (--ret >= 0)
4693 put_page(pages[ret]);
4694 written = -EFAULT;
4695 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004696 }
4697
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004698 for (i = 0; i < nr_pages; i++)
4699 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004700
4701 local_save_flags(irq_flags);
4702 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004703 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004704 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4705 irq_flags, preempt_count());
4706 if (!event) {
4707 /* Ring buffer disabled, return as if not open for write */
4708 written = -EBADF;
4709 goto out_unlock;
4710 }
4711
4712 entry = ring_buffer_event_data(event);
4713 entry->ip = _THIS_IP_;
4714
4715 if (nr_pages == 2) {
4716 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004717 memcpy(&entry->buf, map_page[0] + offset, len);
4718 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004719 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004720 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004721
4722 if (entry->buf[cnt - 1] != '\n') {
4723 entry->buf[cnt] = '\n';
4724 entry->buf[cnt + 1] = '\0';
4725 } else
4726 entry->buf[cnt] = '\0';
4727
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004728 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004729
4730 written = cnt;
4731
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004732 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004733
Steven Rostedtd696b582011-09-22 11:50:27 -04004734 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004735 for (i = 0; i < nr_pages; i++){
4736 kunmap_atomic(map_page[i]);
4737 put_page(pages[i]);
4738 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004739 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004740 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004741}
4742
Li Zefan13f16d22009-12-08 11:16:11 +08004743static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004744{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004745 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004746 int i;
4747
4748 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004749 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004750 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004751 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4752 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004753 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004754
Li Zefan13f16d22009-12-08 11:16:11 +08004755 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004756}
4757
Steven Rostedte1e232c2014-02-10 23:38:46 -05004758static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004759{
Zhaolei5079f322009-08-25 16:12:56 +08004760 int i;
4761
Zhaolei5079f322009-08-25 16:12:56 +08004762 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4763 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4764 break;
4765 }
4766 if (i == ARRAY_SIZE(trace_clocks))
4767 return -EINVAL;
4768
Zhaolei5079f322009-08-25 16:12:56 +08004769 mutex_lock(&trace_types_lock);
4770
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004771 tr->clock_id = i;
4772
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004773 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004774
David Sharp60303ed2012-10-11 16:27:52 -07004775 /*
4776 * New clock may not be consistent with the previous clock.
4777 * Reset the buffer so that it doesn't have incomparable timestamps.
4778 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004779 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004780
4781#ifdef CONFIG_TRACER_MAX_TRACE
4782 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4783 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004784 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004785#endif
David Sharp60303ed2012-10-11 16:27:52 -07004786
Zhaolei5079f322009-08-25 16:12:56 +08004787 mutex_unlock(&trace_types_lock);
4788
Steven Rostedte1e232c2014-02-10 23:38:46 -05004789 return 0;
4790}
4791
4792static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4793 size_t cnt, loff_t *fpos)
4794{
4795 struct seq_file *m = filp->private_data;
4796 struct trace_array *tr = m->private;
4797 char buf[64];
4798 const char *clockstr;
4799 int ret;
4800
4801 if (cnt >= sizeof(buf))
4802 return -EINVAL;
4803
4804 if (copy_from_user(&buf, ubuf, cnt))
4805 return -EFAULT;
4806
4807 buf[cnt] = 0;
4808
4809 clockstr = strstrip(buf);
4810
4811 ret = tracing_set_clock(tr, clockstr);
4812 if (ret)
4813 return ret;
4814
Zhaolei5079f322009-08-25 16:12:56 +08004815 *fpos += cnt;
4816
4817 return cnt;
4818}
4819
Li Zefan13f16d22009-12-08 11:16:11 +08004820static int tracing_clock_open(struct inode *inode, struct file *file)
4821{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004822 struct trace_array *tr = inode->i_private;
4823 int ret;
4824
Li Zefan13f16d22009-12-08 11:16:11 +08004825 if (tracing_disabled)
4826 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004827
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004828 if (trace_array_get(tr))
4829 return -ENODEV;
4830
4831 ret = single_open(file, tracing_clock_show, inode->i_private);
4832 if (ret < 0)
4833 trace_array_put(tr);
4834
4835 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004836}
4837
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004838struct ftrace_buffer_info {
4839 struct trace_iterator iter;
4840 void *spare;
4841 unsigned int read;
4842};
4843
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004844#ifdef CONFIG_TRACER_SNAPSHOT
4845static int tracing_snapshot_open(struct inode *inode, struct file *file)
4846{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004847 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004848 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004849 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004850 int ret = 0;
4851
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004852 if (trace_array_get(tr) < 0)
4853 return -ENODEV;
4854
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004855 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004856 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004857 if (IS_ERR(iter))
4858 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004859 } else {
4860 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004861 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004862 m = kzalloc(sizeof(*m), GFP_KERNEL);
4863 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004864 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004865 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4866 if (!iter) {
4867 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004868 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004869 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004870 ret = 0;
4871
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004872 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004873 iter->trace_buffer = &tr->max_buffer;
4874 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004875 m->private = iter;
4876 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004877 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004878out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004879 if (ret < 0)
4880 trace_array_put(tr);
4881
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004882 return ret;
4883}
4884
4885static ssize_t
4886tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4887 loff_t *ppos)
4888{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004889 struct seq_file *m = filp->private_data;
4890 struct trace_iterator *iter = m->private;
4891 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004892 unsigned long val;
4893 int ret;
4894
4895 ret = tracing_update_buffers();
4896 if (ret < 0)
4897 return ret;
4898
4899 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4900 if (ret)
4901 return ret;
4902
4903 mutex_lock(&trace_types_lock);
4904
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004905 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004906 ret = -EBUSY;
4907 goto out;
4908 }
4909
4910 switch (val) {
4911 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004912 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4913 ret = -EINVAL;
4914 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004915 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004916 if (tr->allocated_snapshot)
4917 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004918 break;
4919 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004920/* Only allow per-cpu swap if the ring buffer supports it */
4921#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4922 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4923 ret = -EINVAL;
4924 break;
4925 }
4926#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004927 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004928 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004929 if (ret < 0)
4930 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004931 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004932 local_irq_disable();
4933 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004934 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004935 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004936 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004937 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004938 local_irq_enable();
4939 break;
4940 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004941 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004942 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4943 tracing_reset_online_cpus(&tr->max_buffer);
4944 else
4945 tracing_reset(&tr->max_buffer, iter->cpu_file);
4946 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004947 break;
4948 }
4949
4950 if (ret >= 0) {
4951 *ppos += cnt;
4952 ret = cnt;
4953 }
4954out:
4955 mutex_unlock(&trace_types_lock);
4956 return ret;
4957}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004958
4959static int tracing_snapshot_release(struct inode *inode, struct file *file)
4960{
4961 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004962 int ret;
4963
4964 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004965
4966 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004967 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004968
4969 /* If write only, the seq_file is just a stub */
4970 if (m)
4971 kfree(m->private);
4972 kfree(m);
4973
4974 return 0;
4975}
4976
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004977static int tracing_buffers_open(struct inode *inode, struct file *filp);
4978static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4979 size_t count, loff_t *ppos);
4980static int tracing_buffers_release(struct inode *inode, struct file *file);
4981static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4982 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4983
4984static int snapshot_raw_open(struct inode *inode, struct file *filp)
4985{
4986 struct ftrace_buffer_info *info;
4987 int ret;
4988
4989 ret = tracing_buffers_open(inode, filp);
4990 if (ret < 0)
4991 return ret;
4992
4993 info = filp->private_data;
4994
4995 if (info->iter.trace->use_max_tr) {
4996 tracing_buffers_release(inode, filp);
4997 return -EBUSY;
4998 }
4999
5000 info->iter.snapshot = true;
5001 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5002
5003 return ret;
5004}
5005
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005006#endif /* CONFIG_TRACER_SNAPSHOT */
5007
5008
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005009static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005010 .open = tracing_open_generic,
5011 .read = tracing_max_lat_read,
5012 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005013 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005014};
5015
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005016static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005017 .open = tracing_open_generic,
5018 .read = tracing_set_trace_read,
5019 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005020 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005021};
5022
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005023static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005024 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005025 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005026 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005027 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005028 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005029 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005030};
5031
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005032static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005033 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005034 .read = tracing_entries_read,
5035 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005036 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005037 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005038};
5039
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005040static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005041 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005042 .read = tracing_total_entries_read,
5043 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005044 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005045};
5046
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005047static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005048 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005049 .write = tracing_free_buffer_write,
5050 .release = tracing_free_buffer_release,
5051};
5052
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005053static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005054 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005055 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005056 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005057 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005058};
5059
Zhaolei5079f322009-08-25 16:12:56 +08005060static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005061 .open = tracing_clock_open,
5062 .read = seq_read,
5063 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005064 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005065 .write = tracing_clock_write,
5066};
5067
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005068#ifdef CONFIG_TRACER_SNAPSHOT
5069static const struct file_operations snapshot_fops = {
5070 .open = tracing_snapshot_open,
5071 .read = seq_read,
5072 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005073 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005074 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005075};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005076
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005077static const struct file_operations snapshot_raw_fops = {
5078 .open = snapshot_raw_open,
5079 .read = tracing_buffers_read,
5080 .release = tracing_buffers_release,
5081 .splice_read = tracing_buffers_splice_read,
5082 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005083};
5084
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005085#endif /* CONFIG_TRACER_SNAPSHOT */
5086
Steven Rostedt2cadf912008-12-01 22:20:19 -05005087static int tracing_buffers_open(struct inode *inode, struct file *filp)
5088{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005089 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005090 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005091 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005092
5093 if (tracing_disabled)
5094 return -ENODEV;
5095
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005096 if (trace_array_get(tr) < 0)
5097 return -ENODEV;
5098
Steven Rostedt2cadf912008-12-01 22:20:19 -05005099 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005100 if (!info) {
5101 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005102 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005103 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005104
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005105 mutex_lock(&trace_types_lock);
5106
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005107 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005108 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005109 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005110 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005111 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005112 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005113 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005114
5115 filp->private_data = info;
5116
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005117 mutex_unlock(&trace_types_lock);
5118
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005119 ret = nonseekable_open(inode, filp);
5120 if (ret < 0)
5121 trace_array_put(tr);
5122
5123 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005124}
5125
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005126static unsigned int
5127tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5128{
5129 struct ftrace_buffer_info *info = filp->private_data;
5130 struct trace_iterator *iter = &info->iter;
5131
5132 return trace_poll(iter, filp, poll_table);
5133}
5134
Steven Rostedt2cadf912008-12-01 22:20:19 -05005135static ssize_t
5136tracing_buffers_read(struct file *filp, char __user *ubuf,
5137 size_t count, loff_t *ppos)
5138{
5139 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005140 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005141 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005142 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005143
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005144 if (!count)
5145 return 0;
5146
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005147 mutex_lock(&trace_types_lock);
5148
5149#ifdef CONFIG_TRACER_MAX_TRACE
5150 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5151 size = -EBUSY;
5152 goto out_unlock;
5153 }
5154#endif
5155
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005156 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005157 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5158 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005159 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005160 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005161 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005162
Steven Rostedt2cadf912008-12-01 22:20:19 -05005163 /* Do we have previous read data to read? */
5164 if (info->read < PAGE_SIZE)
5165 goto read;
5166
Steven Rostedtb6273442013-02-28 13:44:11 -05005167 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005168 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005169 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005170 &info->spare,
5171 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005172 iter->cpu_file, 0);
5173 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005174
5175 if (ret < 0) {
5176 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005177 if ((filp->f_flags & O_NONBLOCK)) {
5178 size = -EAGAIN;
5179 goto out_unlock;
5180 }
5181 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005182 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005183 mutex_lock(&trace_types_lock);
5184 if (signal_pending(current)) {
5185 size = -EINTR;
5186 goto out_unlock;
5187 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005188 goto again;
5189 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005190 size = 0;
5191 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005192 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005193
Steven Rostedt436fc282011-10-14 10:44:25 -04005194 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005195 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005196 size = PAGE_SIZE - info->read;
5197 if (size > count)
5198 size = count;
5199
5200 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005201 if (ret == size) {
5202 size = -EFAULT;
5203 goto out_unlock;
5204 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005205 size -= ret;
5206
Steven Rostedt2cadf912008-12-01 22:20:19 -05005207 *ppos += size;
5208 info->read += size;
5209
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005210 out_unlock:
5211 mutex_unlock(&trace_types_lock);
5212
Steven Rostedt2cadf912008-12-01 22:20:19 -05005213 return size;
5214}
5215
5216static int tracing_buffers_release(struct inode *inode, struct file *file)
5217{
5218 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005219 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005220
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005221 mutex_lock(&trace_types_lock);
5222
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005223 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005224
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005225 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005226 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005227 kfree(info);
5228
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005229 mutex_unlock(&trace_types_lock);
5230
Steven Rostedt2cadf912008-12-01 22:20:19 -05005231 return 0;
5232}
5233
5234struct buffer_ref {
5235 struct ring_buffer *buffer;
5236 void *page;
5237 int ref;
5238};
5239
5240static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5241 struct pipe_buffer *buf)
5242{
5243 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5244
5245 if (--ref->ref)
5246 return;
5247
5248 ring_buffer_free_read_page(ref->buffer, ref->page);
5249 kfree(ref);
5250 buf->private = 0;
5251}
5252
Steven Rostedt2cadf912008-12-01 22:20:19 -05005253static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5254 struct pipe_buffer *buf)
5255{
5256 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5257
5258 ref->ref++;
5259}
5260
5261/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005262static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005263 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005264 .confirm = generic_pipe_buf_confirm,
5265 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005266 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005267 .get = buffer_pipe_buf_get,
5268};
5269
5270/*
5271 * Callback from splice_to_pipe(), if we need to release some pages
5272 * at the end of the spd in case we error'ed out in filling the pipe.
5273 */
5274static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5275{
5276 struct buffer_ref *ref =
5277 (struct buffer_ref *)spd->partial[i].private;
5278
5279 if (--ref->ref)
5280 return;
5281
5282 ring_buffer_free_read_page(ref->buffer, ref->page);
5283 kfree(ref);
5284 spd->partial[i].private = 0;
5285}
5286
5287static ssize_t
5288tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5289 struct pipe_inode_info *pipe, size_t len,
5290 unsigned int flags)
5291{
5292 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005293 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005294 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5295 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005296 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005297 .pages = pages_def,
5298 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005299 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005300 .flags = flags,
5301 .ops = &buffer_pipe_buf_ops,
5302 .spd_release = buffer_spd_release,
5303 };
5304 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005305 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005306 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005307
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005308 mutex_lock(&trace_types_lock);
5309
5310#ifdef CONFIG_TRACER_MAX_TRACE
5311 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5312 ret = -EBUSY;
5313 goto out;
5314 }
5315#endif
5316
5317 if (splice_grow_spd(pipe, &spd)) {
5318 ret = -ENOMEM;
5319 goto out;
5320 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005321
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005322 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005323 ret = -EINVAL;
5324 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005325 }
5326
5327 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005328 if (len < PAGE_SIZE) {
5329 ret = -EINVAL;
5330 goto out;
5331 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005332 len &= PAGE_MASK;
5333 }
5334
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005335 again:
5336 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005337 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005338
Al Viroa786c062014-04-11 12:01:03 -04005339 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005340 struct page *page;
5341 int r;
5342
5343 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5344 if (!ref)
5345 break;
5346
Steven Rostedt7267fa62009-04-29 00:16:21 -04005347 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005348 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005349 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005350 if (!ref->page) {
5351 kfree(ref);
5352 break;
5353 }
5354
5355 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005356 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005357 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005358 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005359 kfree(ref);
5360 break;
5361 }
5362
5363 /*
5364 * zero out any left over data, this is going to
5365 * user land.
5366 */
5367 size = ring_buffer_page_len(ref->page);
5368 if (size < PAGE_SIZE)
5369 memset(ref->page + size, 0, PAGE_SIZE - size);
5370
5371 page = virt_to_page(ref->page);
5372
5373 spd.pages[i] = page;
5374 spd.partial[i].len = PAGE_SIZE;
5375 spd.partial[i].offset = 0;
5376 spd.partial[i].private = (unsigned long)ref;
5377 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005378 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005379
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005380 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005381 }
5382
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005383 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005384 spd.nr_pages = i;
5385
5386 /* did we read anything? */
5387 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005388 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005389 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005390 goto out;
5391 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005392 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005393 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005394 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005395 if (signal_pending(current)) {
5396 ret = -EINTR;
5397 goto out;
5398 }
5399 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005400 }
5401
5402 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005403 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005404out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005405 mutex_unlock(&trace_types_lock);
5406
Steven Rostedt2cadf912008-12-01 22:20:19 -05005407 return ret;
5408}
5409
5410static const struct file_operations tracing_buffers_fops = {
5411 .open = tracing_buffers_open,
5412 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005413 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005414 .release = tracing_buffers_release,
5415 .splice_read = tracing_buffers_splice_read,
5416 .llseek = no_llseek,
5417};
5418
Steven Rostedtc8d77182009-04-29 18:03:45 -04005419static ssize_t
5420tracing_stats_read(struct file *filp, char __user *ubuf,
5421 size_t count, loff_t *ppos)
5422{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005423 struct inode *inode = file_inode(filp);
5424 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005425 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005426 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005427 struct trace_seq *s;
5428 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005429 unsigned long long t;
5430 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005431
Li Zefane4f2d102009-06-15 10:57:28 +08005432 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005433 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005434 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005435
5436 trace_seq_init(s);
5437
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005438 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005439 trace_seq_printf(s, "entries: %ld\n", cnt);
5440
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005441 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005442 trace_seq_printf(s, "overrun: %ld\n", cnt);
5443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005444 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005445 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5446
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005447 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005448 trace_seq_printf(s, "bytes: %ld\n", cnt);
5449
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005450 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005451 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005452 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005453 usec_rem = do_div(t, USEC_PER_SEC);
5454 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5455 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005458 usec_rem = do_div(t, USEC_PER_SEC);
5459 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5460 } else {
5461 /* counter or tsc mode for trace_clock */
5462 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005463 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005464
5465 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005466 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005467 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005468
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005469 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005470 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5471
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005472 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005473 trace_seq_printf(s, "read events: %ld\n", cnt);
5474
Steven Rostedtc8d77182009-04-29 18:03:45 -04005475 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5476
5477 kfree(s);
5478
5479 return count;
5480}
5481
5482static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005483 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005484 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005485 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005486 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005487};
5488
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005489#ifdef CONFIG_DYNAMIC_FTRACE
5490
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005491int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005492{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005493 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005494}
5495
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005496static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005497tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005498 size_t cnt, loff_t *ppos)
5499{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005500 static char ftrace_dyn_info_buffer[1024];
5501 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005502 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005503 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005504 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005505 int r;
5506
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005507 mutex_lock(&dyn_info_mutex);
5508 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005509
Steven Rostedta26a2a22008-10-31 00:03:22 -04005510 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005511 buf[r++] = '\n';
5512
5513 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5514
5515 mutex_unlock(&dyn_info_mutex);
5516
5517 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005518}
5519
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005520static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005521 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005522 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005523 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005524};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005525#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005526
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005527#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5528static void
5529ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005530{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005531 tracing_snapshot();
5532}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005533
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005534static void
5535ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5536{
5537 unsigned long *count = (long *)data;
5538
5539 if (!*count)
5540 return;
5541
5542 if (*count != -1)
5543 (*count)--;
5544
5545 tracing_snapshot();
5546}
5547
5548static int
5549ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5550 struct ftrace_probe_ops *ops, void *data)
5551{
5552 long count = (long)data;
5553
5554 seq_printf(m, "%ps:", (void *)ip);
5555
5556 seq_printf(m, "snapshot");
5557
5558 if (count == -1)
5559 seq_printf(m, ":unlimited\n");
5560 else
5561 seq_printf(m, ":count=%ld\n", count);
5562
5563 return 0;
5564}
5565
5566static struct ftrace_probe_ops snapshot_probe_ops = {
5567 .func = ftrace_snapshot,
5568 .print = ftrace_snapshot_print,
5569};
5570
5571static struct ftrace_probe_ops snapshot_count_probe_ops = {
5572 .func = ftrace_count_snapshot,
5573 .print = ftrace_snapshot_print,
5574};
5575
5576static int
5577ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5578 char *glob, char *cmd, char *param, int enable)
5579{
5580 struct ftrace_probe_ops *ops;
5581 void *count = (void *)-1;
5582 char *number;
5583 int ret;
5584
5585 /* hash funcs only work with set_ftrace_filter */
5586 if (!enable)
5587 return -EINVAL;
5588
5589 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5590
5591 if (glob[0] == '!') {
5592 unregister_ftrace_function_probe_func(glob+1, ops);
5593 return 0;
5594 }
5595
5596 if (!param)
5597 goto out_reg;
5598
5599 number = strsep(&param, ":");
5600
5601 if (!strlen(number))
5602 goto out_reg;
5603
5604 /*
5605 * We use the callback data field (which is a pointer)
5606 * as our counter.
5607 */
5608 ret = kstrtoul(number, 0, (unsigned long *)&count);
5609 if (ret)
5610 return ret;
5611
5612 out_reg:
5613 ret = register_ftrace_function_probe(glob, ops, count);
5614
5615 if (ret >= 0)
5616 alloc_snapshot(&global_trace);
5617
5618 return ret < 0 ? ret : 0;
5619}
5620
5621static struct ftrace_func_command ftrace_snapshot_cmd = {
5622 .name = "snapshot",
5623 .func = ftrace_trace_snapshot_callback,
5624};
5625
Tom Zanussi38de93a2013-10-24 08:34:18 -05005626static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005627{
5628 return register_ftrace_command(&ftrace_snapshot_cmd);
5629}
5630#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005631static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005632#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005633
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005634struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005635{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005636 if (tr->dir)
5637 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005638
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005639 if (!debugfs_initialized())
5640 return NULL;
5641
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005642 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5643 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005644
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005645 if (!tr->dir)
5646 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005647
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005648 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005649}
5650
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005651struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005652{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005653 return tracing_init_dentry_tr(&global_trace);
5654}
5655
5656static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5657{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005658 struct dentry *d_tracer;
5659
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005660 if (tr->percpu_dir)
5661 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005662
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005663 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005664 if (!d_tracer)
5665 return NULL;
5666
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005667 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005668
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005669 WARN_ONCE(!tr->percpu_dir,
5670 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005671
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005672 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005673}
5674
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005675static struct dentry *
5676trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5677 void *data, long cpu, const struct file_operations *fops)
5678{
5679 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5680
5681 if (ret) /* See tracing_get_cpu() */
5682 ret->d_inode->i_cdev = (void *)(cpu + 1);
5683 return ret;
5684}
5685
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005686static void
5687tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005688{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005689 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005690 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005691 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005692
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005693 if (!d_percpu)
5694 return;
5695
Steven Rostedtdd49a382010-10-20 21:51:26 -04005696 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005697 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5698 if (!d_cpu) {
5699 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5700 return;
5701 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005702
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005703 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005704 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005705 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005706
5707 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005708 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005709 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005710
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005711 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005712 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005713
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005714 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005715 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005716
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005717 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005718 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005719
5720#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005721 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005722 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005723
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005724 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005725 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005726#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005727}
5728
Steven Rostedt60a11772008-05-12 21:20:44 +02005729#ifdef CONFIG_FTRACE_SELFTEST
5730/* Let selftest have access to static functions in this file */
5731#include "trace_selftest.c"
5732#endif
5733
Steven Rostedt577b7852009-02-26 23:43:05 -05005734struct trace_option_dentry {
5735 struct tracer_opt *opt;
5736 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005737 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005738 struct dentry *entry;
5739};
5740
5741static ssize_t
5742trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5743 loff_t *ppos)
5744{
5745 struct trace_option_dentry *topt = filp->private_data;
5746 char *buf;
5747
5748 if (topt->flags->val & topt->opt->bit)
5749 buf = "1\n";
5750 else
5751 buf = "0\n";
5752
5753 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5754}
5755
5756static ssize_t
5757trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5758 loff_t *ppos)
5759{
5760 struct trace_option_dentry *topt = filp->private_data;
5761 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005762 int ret;
5763
Peter Huewe22fe9b52011-06-07 21:58:27 +02005764 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5765 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005766 return ret;
5767
Li Zefan8d18eaa2009-12-08 11:17:06 +08005768 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005769 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005770
5771 if (!!(topt->flags->val & topt->opt->bit) != val) {
5772 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005773 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005774 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005775 mutex_unlock(&trace_types_lock);
5776 if (ret)
5777 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005778 }
5779
5780 *ppos += cnt;
5781
5782 return cnt;
5783}
5784
5785
5786static const struct file_operations trace_options_fops = {
5787 .open = tracing_open_generic,
5788 .read = trace_options_read,
5789 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005790 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005791};
5792
Steven Rostedta8259072009-02-26 22:19:12 -05005793static ssize_t
5794trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5795 loff_t *ppos)
5796{
5797 long index = (long)filp->private_data;
5798 char *buf;
5799
5800 if (trace_flags & (1 << index))
5801 buf = "1\n";
5802 else
5803 buf = "0\n";
5804
5805 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5806}
5807
5808static ssize_t
5809trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5810 loff_t *ppos)
5811{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005812 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005813 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005814 unsigned long val;
5815 int ret;
5816
Peter Huewe22fe9b52011-06-07 21:58:27 +02005817 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5818 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005819 return ret;
5820
Zhaoleif2d84b62009-08-07 18:55:48 +08005821 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005822 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005823
5824 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005826 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005827
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005828 if (ret < 0)
5829 return ret;
5830
Steven Rostedta8259072009-02-26 22:19:12 -05005831 *ppos += cnt;
5832
5833 return cnt;
5834}
5835
Steven Rostedta8259072009-02-26 22:19:12 -05005836static const struct file_operations trace_options_core_fops = {
5837 .open = tracing_open_generic,
5838 .read = trace_options_core_read,
5839 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005840 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005841};
5842
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005843struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005844 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005845 struct dentry *parent,
5846 void *data,
5847 const struct file_operations *fops)
5848{
5849 struct dentry *ret;
5850
5851 ret = debugfs_create_file(name, mode, parent, data, fops);
5852 if (!ret)
5853 pr_warning("Could not create debugfs '%s' entry\n", name);
5854
5855 return ret;
5856}
5857
5858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005859static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005860{
5861 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005862
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005863 if (tr->options)
5864 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005865
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005866 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005867 if (!d_tracer)
5868 return NULL;
5869
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 tr->options = debugfs_create_dir("options", d_tracer);
5871 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005872 pr_warning("Could not create debugfs directory 'options'\n");
5873 return NULL;
5874 }
5875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005876 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005877}
5878
Steven Rostedt577b7852009-02-26 23:43:05 -05005879static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005880create_trace_option_file(struct trace_array *tr,
5881 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005882 struct tracer_flags *flags,
5883 struct tracer_opt *opt)
5884{
5885 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005886
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005887 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005888 if (!t_options)
5889 return;
5890
5891 topt->flags = flags;
5892 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005893 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005894
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005895 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005896 &trace_options_fops);
5897
Steven Rostedt577b7852009-02-26 23:43:05 -05005898}
5899
5900static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005901create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005902{
5903 struct trace_option_dentry *topts;
5904 struct tracer_flags *flags;
5905 struct tracer_opt *opts;
5906 int cnt;
5907
5908 if (!tracer)
5909 return NULL;
5910
5911 flags = tracer->flags;
5912
5913 if (!flags || !flags->opts)
5914 return NULL;
5915
5916 opts = flags->opts;
5917
5918 for (cnt = 0; opts[cnt].name; cnt++)
5919 ;
5920
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005921 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005922 if (!topts)
5923 return NULL;
5924
5925 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005926 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005927 &opts[cnt]);
5928
5929 return topts;
5930}
5931
5932static void
5933destroy_trace_option_files(struct trace_option_dentry *topts)
5934{
5935 int cnt;
5936
5937 if (!topts)
5938 return;
5939
5940 for (cnt = 0; topts[cnt].opt; cnt++) {
5941 if (topts[cnt].entry)
5942 debugfs_remove(topts[cnt].entry);
5943 }
5944
5945 kfree(topts);
5946}
5947
Steven Rostedta8259072009-02-26 22:19:12 -05005948static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005949create_trace_option_core_file(struct trace_array *tr,
5950 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005951{
5952 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005953
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005954 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005955 if (!t_options)
5956 return NULL;
5957
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005958 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005959 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005960}
5961
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005962static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005963{
5964 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005965 int i;
5966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005967 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005968 if (!t_options)
5969 return;
5970
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005971 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005972 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005973}
5974
Steven Rostedt499e5472012-02-22 15:50:28 -05005975static ssize_t
5976rb_simple_read(struct file *filp, char __user *ubuf,
5977 size_t cnt, loff_t *ppos)
5978{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005979 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005980 char buf[64];
5981 int r;
5982
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005983 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005984 r = sprintf(buf, "%d\n", r);
5985
5986 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5987}
5988
5989static ssize_t
5990rb_simple_write(struct file *filp, const char __user *ubuf,
5991 size_t cnt, loff_t *ppos)
5992{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005993 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005994 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005995 unsigned long val;
5996 int ret;
5997
5998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5999 if (ret)
6000 return ret;
6001
6002 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006003 mutex_lock(&trace_types_lock);
6004 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006005 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006006 if (tr->current_trace->start)
6007 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006008 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006009 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006010 if (tr->current_trace->stop)
6011 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006012 }
6013 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006014 }
6015
6016 (*ppos)++;
6017
6018 return cnt;
6019}
6020
6021static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006022 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006023 .read = rb_simple_read,
6024 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006025 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006026 .llseek = default_llseek,
6027};
6028
Steven Rostedt277ba042012-08-03 16:10:49 -04006029struct dentry *trace_instance_dir;
6030
6031static void
6032init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6033
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006034static int
6035allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006036{
6037 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006038
6039 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6040
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006041 buf->tr = tr;
6042
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006043 buf->buffer = ring_buffer_alloc(size, rb_flags);
6044 if (!buf->buffer)
6045 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006046
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006047 buf->data = alloc_percpu(struct trace_array_cpu);
6048 if (!buf->data) {
6049 ring_buffer_free(buf->buffer);
6050 return -ENOMEM;
6051 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006052
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006053 /* Allocate the first page for all buffers */
6054 set_buffer_entries(&tr->trace_buffer,
6055 ring_buffer_size(tr->trace_buffer.buffer, 0));
6056
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006057 return 0;
6058}
6059
6060static int allocate_trace_buffers(struct trace_array *tr, int size)
6061{
6062 int ret;
6063
6064 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6065 if (ret)
6066 return ret;
6067
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006068#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006069 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6070 allocate_snapshot ? size : 1);
6071 if (WARN_ON(ret)) {
6072 ring_buffer_free(tr->trace_buffer.buffer);
6073 free_percpu(tr->trace_buffer.data);
6074 return -ENOMEM;
6075 }
6076 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006077
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006078 /*
6079 * Only the top level trace array gets its snapshot allocated
6080 * from the kernel command line.
6081 */
6082 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006083#endif
6084 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006085}
6086
6087static int new_instance_create(const char *name)
6088{
Steven Rostedt277ba042012-08-03 16:10:49 -04006089 struct trace_array *tr;
6090 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006091
6092 mutex_lock(&trace_types_lock);
6093
6094 ret = -EEXIST;
6095 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6096 if (tr->name && strcmp(tr->name, name) == 0)
6097 goto out_unlock;
6098 }
6099
6100 ret = -ENOMEM;
6101 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6102 if (!tr)
6103 goto out_unlock;
6104
6105 tr->name = kstrdup(name, GFP_KERNEL);
6106 if (!tr->name)
6107 goto out_free_tr;
6108
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006109 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6110 goto out_free_tr;
6111
6112 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6113
Steven Rostedt277ba042012-08-03 16:10:49 -04006114 raw_spin_lock_init(&tr->start_lock);
6115
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006116 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6117
Steven Rostedt277ba042012-08-03 16:10:49 -04006118 tr->current_trace = &nop_trace;
6119
6120 INIT_LIST_HEAD(&tr->systems);
6121 INIT_LIST_HEAD(&tr->events);
6122
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006123 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006124 goto out_free_tr;
6125
Steven Rostedt277ba042012-08-03 16:10:49 -04006126 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6127 if (!tr->dir)
6128 goto out_free_tr;
6129
6130 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006131 if (ret) {
6132 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006133 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006134 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006135
6136 init_tracer_debugfs(tr, tr->dir);
6137
6138 list_add(&tr->list, &ftrace_trace_arrays);
6139
6140 mutex_unlock(&trace_types_lock);
6141
6142 return 0;
6143
6144 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006145 if (tr->trace_buffer.buffer)
6146 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006147 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006148 kfree(tr->name);
6149 kfree(tr);
6150
6151 out_unlock:
6152 mutex_unlock(&trace_types_lock);
6153
6154 return ret;
6155
6156}
6157
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006158static int instance_delete(const char *name)
6159{
6160 struct trace_array *tr;
6161 int found = 0;
6162 int ret;
6163
6164 mutex_lock(&trace_types_lock);
6165
6166 ret = -ENODEV;
6167 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6168 if (tr->name && strcmp(tr->name, name) == 0) {
6169 found = 1;
6170 break;
6171 }
6172 }
6173 if (!found)
6174 goto out_unlock;
6175
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006176 ret = -EBUSY;
6177 if (tr->ref)
6178 goto out_unlock;
6179
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006180 list_del(&tr->list);
6181
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006182 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006183 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006184 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006185 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006186 free_percpu(tr->trace_buffer.data);
6187 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006188
6189 kfree(tr->name);
6190 kfree(tr);
6191
6192 ret = 0;
6193
6194 out_unlock:
6195 mutex_unlock(&trace_types_lock);
6196
6197 return ret;
6198}
6199
Steven Rostedt277ba042012-08-03 16:10:49 -04006200static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6201{
6202 struct dentry *parent;
6203 int ret;
6204
6205 /* Paranoid: Make sure the parent is the "instances" directory */
6206 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6207 if (WARN_ON_ONCE(parent != trace_instance_dir))
6208 return -ENOENT;
6209
6210 /*
6211 * The inode mutex is locked, but debugfs_create_dir() will also
6212 * take the mutex. As the instances directory can not be destroyed
6213 * or changed in any other way, it is safe to unlock it, and
6214 * let the dentry try. If two users try to make the same dir at
6215 * the same time, then the new_instance_create() will determine the
6216 * winner.
6217 */
6218 mutex_unlock(&inode->i_mutex);
6219
6220 ret = new_instance_create(dentry->d_iname);
6221
6222 mutex_lock(&inode->i_mutex);
6223
6224 return ret;
6225}
6226
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006227static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6228{
6229 struct dentry *parent;
6230 int ret;
6231
6232 /* Paranoid: Make sure the parent is the "instances" directory */
6233 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6234 if (WARN_ON_ONCE(parent != trace_instance_dir))
6235 return -ENOENT;
6236
6237 /* The caller did a dget() on dentry */
6238 mutex_unlock(&dentry->d_inode->i_mutex);
6239
6240 /*
6241 * The inode mutex is locked, but debugfs_create_dir() will also
6242 * take the mutex. As the instances directory can not be destroyed
6243 * or changed in any other way, it is safe to unlock it, and
6244 * let the dentry try. If two users try to make the same dir at
6245 * the same time, then the instance_delete() will determine the
6246 * winner.
6247 */
6248 mutex_unlock(&inode->i_mutex);
6249
6250 ret = instance_delete(dentry->d_iname);
6251
6252 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6253 mutex_lock(&dentry->d_inode->i_mutex);
6254
6255 return ret;
6256}
6257
Steven Rostedt277ba042012-08-03 16:10:49 -04006258static const struct inode_operations instance_dir_inode_operations = {
6259 .lookup = simple_lookup,
6260 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006261 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006262};
6263
6264static __init void create_trace_instances(struct dentry *d_tracer)
6265{
6266 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6267 if (WARN_ON(!trace_instance_dir))
6268 return;
6269
6270 /* Hijack the dir inode operations, to allow mkdir */
6271 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6272}
6273
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006274static void
6275init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6276{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006277 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006278
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006279 trace_create_file("available_tracers", 0444, d_tracer,
6280 tr, &show_traces_fops);
6281
6282 trace_create_file("current_tracer", 0644, d_tracer,
6283 tr, &set_tracer_fops);
6284
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006285 trace_create_file("tracing_cpumask", 0644, d_tracer,
6286 tr, &tracing_cpumask_fops);
6287
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006288 trace_create_file("trace_options", 0644, d_tracer,
6289 tr, &tracing_iter_fops);
6290
6291 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006292 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006293
6294 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006295 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006296
6297 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006298 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006299
6300 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6301 tr, &tracing_total_entries_fops);
6302
Wang YanQing238ae932013-05-26 16:52:01 +08006303 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006304 tr, &tracing_free_buffer_fops);
6305
6306 trace_create_file("trace_marker", 0220, d_tracer,
6307 tr, &tracing_mark_fops);
6308
6309 trace_create_file("trace_clock", 0644, d_tracer, tr,
6310 &trace_clock_fops);
6311
6312 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006313 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006314
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006315#ifdef CONFIG_TRACER_MAX_TRACE
6316 trace_create_file("tracing_max_latency", 0644, d_tracer,
6317 &tr->max_latency, &tracing_max_lat_fops);
6318#endif
6319
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006320 if (ftrace_create_function_files(tr, d_tracer))
6321 WARN(1, "Could not allocate function filter files");
6322
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006323#ifdef CONFIG_TRACER_SNAPSHOT
6324 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006325 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006326#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006327
6328 for_each_tracing_cpu(cpu)
6329 tracing_init_debugfs_percpu(tr, cpu);
6330
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006331}
6332
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006333static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006334{
6335 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006336
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006337 trace_access_lock_init();
6338
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006339 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006340 if (!d_tracer)
6341 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006343 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006344
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006345 trace_create_file("tracing_thresh", 0644, d_tracer,
6346 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006347
Li Zefan339ae5d2009-04-17 10:34:30 +08006348 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006349 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006350
Avadh Patel69abe6a2009-04-10 16:04:48 -04006351 trace_create_file("saved_cmdlines", 0444, d_tracer,
6352 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006353
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006354#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006355 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6356 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006357#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006358
Steven Rostedt277ba042012-08-03 16:10:49 -04006359 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006360
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006361 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006362
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006363 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006364}
6365
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006366static int trace_panic_handler(struct notifier_block *this,
6367 unsigned long event, void *unused)
6368{
Steven Rostedt944ac422008-10-23 19:26:08 -04006369 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006370 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006371 return NOTIFY_OK;
6372}
6373
6374static struct notifier_block trace_panic_notifier = {
6375 .notifier_call = trace_panic_handler,
6376 .next = NULL,
6377 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6378};
6379
6380static int trace_die_handler(struct notifier_block *self,
6381 unsigned long val,
6382 void *data)
6383{
6384 switch (val) {
6385 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006386 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006387 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006388 break;
6389 default:
6390 break;
6391 }
6392 return NOTIFY_OK;
6393}
6394
6395static struct notifier_block trace_die_notifier = {
6396 .notifier_call = trace_die_handler,
6397 .priority = 200
6398};
6399
6400/*
6401 * printk is set to max of 1024, we really don't need it that big.
6402 * Nothing should be printing 1000 characters anyway.
6403 */
6404#define TRACE_MAX_PRINT 1000
6405
6406/*
6407 * Define here KERN_TRACE so that we have one place to modify
6408 * it if we decide to change what log level the ftrace dump
6409 * should be at.
6410 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006411#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006412
Jason Wessel955b61e2010-08-05 09:22:23 -05006413void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006414trace_printk_seq(struct trace_seq *s)
6415{
6416 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006417 if (s->len >= TRACE_MAX_PRINT)
6418 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006419
6420 /* should be zero ended, but we are paranoid. */
6421 s->buffer[s->len] = 0;
6422
6423 printk(KERN_TRACE "%s", s->buffer);
6424
Steven Rostedtf9520752009-03-02 14:04:40 -05006425 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006426}
6427
Jason Wessel955b61e2010-08-05 09:22:23 -05006428void trace_init_global_iter(struct trace_iterator *iter)
6429{
6430 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006431 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006432 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006433 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006434
6435 if (iter->trace && iter->trace->open)
6436 iter->trace->open(iter);
6437
6438 /* Annotate start of buffers if we had overruns */
6439 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6440 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6441
6442 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6443 if (trace_clocks[iter->tr->clock_id].in_ns)
6444 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006445}
6446
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006447void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006448{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006449 /* use static because iter can be a bit big for the stack */
6450 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006451 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006452 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006453 unsigned long flags;
6454 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006455
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006456 /* Only allow one dump user at a time. */
6457 if (atomic_inc_return(&dump_running) != 1) {
6458 atomic_dec(&dump_running);
6459 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006460 }
6461
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006462 /*
6463 * Always turn off tracing when we dump.
6464 * We don't need to show trace output of what happens
6465 * between multiple crashes.
6466 *
6467 * If the user does a sysrq-z, then they can re-enable
6468 * tracing with echo 1 > tracing_on.
6469 */
6470 tracing_off();
6471
6472 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006473
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006474 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006475 trace_init_global_iter(&iter);
6476
Steven Rostedtd7690412008-10-01 00:29:53 -04006477 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006478 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006479 }
6480
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006481 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6482
Török Edwinb54d3de2008-11-22 13:28:48 +02006483 /* don't look at user memory in panic mode */
6484 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6485
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006486 switch (oops_dump_mode) {
6487 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006488 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006489 break;
6490 case DUMP_ORIG:
6491 iter.cpu_file = raw_smp_processor_id();
6492 break;
6493 case DUMP_NONE:
6494 goto out_enable;
6495 default:
6496 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006497 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006498 }
6499
6500 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006501
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006502 /* Did function tracer already get disabled? */
6503 if (ftrace_is_dead()) {
6504 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6505 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6506 }
6507
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006508 /*
6509 * We need to stop all tracing on all CPUS to read the
6510 * the next buffer. This is a bit expensive, but is
6511 * not done often. We fill all what we can read,
6512 * and then release the locks again.
6513 */
6514
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006515 while (!trace_empty(&iter)) {
6516
6517 if (!cnt)
6518 printk(KERN_TRACE "---------------------------------\n");
6519
6520 cnt++;
6521
6522 /* reset all but tr, trace, and overruns */
6523 memset(&iter.seq, 0,
6524 sizeof(struct trace_iterator) -
6525 offsetof(struct trace_iterator, seq));
6526 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6527 iter.pos = -1;
6528
Jason Wessel955b61e2010-08-05 09:22:23 -05006529 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006530 int ret;
6531
6532 ret = print_trace_line(&iter);
6533 if (ret != TRACE_TYPE_NO_CONSUME)
6534 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006535 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006536 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006537
6538 trace_printk_seq(&iter.seq);
6539 }
6540
6541 if (!cnt)
6542 printk(KERN_TRACE " (ftrace buffer empty)\n");
6543 else
6544 printk(KERN_TRACE "---------------------------------\n");
6545
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006546 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006547 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006548
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006549 for_each_tracing_cpu(cpu) {
6550 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006551 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006552 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006553 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006554}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006555EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006557__init static int tracer_alloc_buffers(void)
6558{
Steven Rostedt73c51622009-03-11 13:42:01 -04006559 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306560 int ret = -ENOMEM;
6561
David Sharp750912f2010-12-08 13:46:47 -08006562
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306563 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6564 goto out;
6565
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006566 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306567 goto out_free_buffer_mask;
6568
Steven Rostedt07d777f2011-09-22 14:01:55 -04006569 /* Only allocate trace_printk buffers if a trace_printk exists */
6570 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006571 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006572 trace_printk_init_buffers();
6573
Steven Rostedt73c51622009-03-11 13:42:01 -04006574 /* To save memory, keep the ring buffer size to its minimum */
6575 if (ring_buffer_expanded)
6576 ring_buf_size = trace_buf_size;
6577 else
6578 ring_buf_size = 1;
6579
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306580 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006581 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006582
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006583 raw_spin_lock_init(&global_trace.start_lock);
6584
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006585 /* Used for event triggers */
6586 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6587 if (!temp_buffer)
6588 goto out_free_cpumask;
6589
Steven Rostedtab464282008-05-12 21:21:00 +02006590 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006591 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006592 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6593 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006594 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006595 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006596
Steven Rostedt499e5472012-02-22 15:50:28 -05006597 if (global_trace.buffer_disabled)
6598 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006599
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006600 trace_init_cmdlines();
6601
Steven Rostedte1e232c2014-02-10 23:38:46 -05006602 if (trace_boot_clock) {
6603 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6604 if (ret < 0)
6605 pr_warning("Trace clock %s not defined, going back to default\n",
6606 trace_boot_clock);
6607 }
6608
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006609 /*
6610 * register_tracer() might reference current_trace, so it
6611 * needs to be set before we register anything. This is
6612 * just a bootstrap of current_trace anyway.
6613 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006614 global_trace.current_trace = &nop_trace;
6615
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006616 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6617
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006618 ftrace_init_global_array_ops(&global_trace);
6619
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006620 register_tracer(&nop_trace);
6621
Steven Rostedt60a11772008-05-12 21:20:44 +02006622 /* All seems OK, enable tracing */
6623 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006624
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006625 atomic_notifier_chain_register(&panic_notifier_list,
6626 &trace_panic_notifier);
6627
6628 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006629
Steven Rostedtae63b312012-05-03 23:09:03 -04006630 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6631
6632 INIT_LIST_HEAD(&global_trace.systems);
6633 INIT_LIST_HEAD(&global_trace.events);
6634 list_add(&global_trace.list, &ftrace_trace_arrays);
6635
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006636 while (trace_boot_options) {
6637 char *option;
6638
6639 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006640 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006641 }
6642
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006643 register_snapshot_cmd();
6644
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006645 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006646
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006647out_free_temp_buffer:
6648 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306649out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006650 free_percpu(global_trace.trace_buffer.data);
6651#ifdef CONFIG_TRACER_MAX_TRACE
6652 free_percpu(global_trace.max_buffer.data);
6653#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006654 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306655out_free_buffer_mask:
6656 free_cpumask_var(tracing_buffer_mask);
6657out:
6658 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006659}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006660
6661__init static int clear_boot_tracer(void)
6662{
6663 /*
6664 * The default tracer at boot buffer is an init section.
6665 * This function is called in lateinit. If we did not
6666 * find the boot tracer, then clear it out, to prevent
6667 * later registration from accessing the buffer that is
6668 * about to be freed.
6669 */
6670 if (!default_bootup_tracer)
6671 return 0;
6672
6673 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6674 default_bootup_tracer);
6675 default_bootup_tracer = NULL;
6676
6677 return 0;
6678}
6679
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006680early_initcall(tracer_alloc_buffers);
6681fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006682late_initcall(clear_boot_tracer);