blob: 22a902e2ded95961f83e2e666ea8a0b7c2e87f47 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
469
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500470 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0;
472
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500473 alloc = sizeof(*entry) + size + 2; /* possible \n added */
474
475 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count());
479 if (!event)
480 return 0;
481
482 entry = ring_buffer_event_data(event);
483 entry->ip = ip;
484
485 memcpy(&entry->buf, str, size);
486
487 /* Add a newline if necessary */
488 if (entry->buf[size - 1] != '\n') {
489 entry->buf[size] = '\n';
490 entry->buf[size + 1] = '\0';
491 } else
492 entry->buf[size] = '\0';
493
494 __buffer_unlock_commit(buffer, event);
495
496 return size;
497}
498EXPORT_SYMBOL_GPL(__trace_puts);
499
500/**
501 * __trace_bputs - write the pointer to a constant string into trace buffer
502 * @ip: The address of the caller
503 * @str: The constant string to write to the buffer to
504 */
505int __trace_bputs(unsigned long ip, const char *str)
506{
507 struct ring_buffer_event *event;
508 struct ring_buffer *buffer;
509 struct bputs_entry *entry;
510 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry);
512
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500513 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0;
515
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count());
520 if (!event)
521 return 0;
522
523 entry = ring_buffer_event_data(event);
524 entry->ip = ip;
525 entry->str = str;
526
527 __buffer_unlock_commit(buffer, event);
528
529 return 1;
530}
531EXPORT_SYMBOL_GPL(__trace_bputs);
532
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500533#ifdef CONFIG_TRACER_SNAPSHOT
534/**
535 * trace_snapshot - take a snapshot of the current buffer.
536 *
537 * This causes a swap between the snapshot buffer and the current live
538 * tracing buffer. You can use this to take snapshots of the live
539 * trace when some condition is triggered, but continue to trace.
540 *
541 * Note, make sure to allocate the snapshot with either
542 * a tracing_snapshot_alloc(), or by doing it manually
543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
544 *
545 * If the snapshot buffer is not allocated, it will stop tracing.
546 * Basically making a permanent snapshot.
547 */
548void tracing_snapshot(void)
549{
550 struct trace_array *tr = &global_trace;
551 struct tracer *tracer = tr->current_trace;
552 unsigned long flags;
553
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500554 if (in_nmi()) {
555 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
556 internal_trace_puts("*** snapshot is being ignored ***\n");
557 return;
558 }
559
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500561 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
562 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500563 tracing_off();
564 return;
565 }
566
567 /* Note, snapshot can not be used when the tracer uses it */
568 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500569 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
570 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500571 return;
572 }
573
574 local_irq_save(flags);
575 update_max_tr(tr, current, smp_processor_id());
576 local_irq_restore(flags);
577}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500578EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500579
580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
581 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
583
584static int alloc_snapshot(struct trace_array *tr)
585{
586 int ret;
587
588 if (!tr->allocated_snapshot) {
589
590 /* allocate spare buffer */
591 ret = resize_buffer_duplicate_size(&tr->max_buffer,
592 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
593 if (ret < 0)
594 return ret;
595
596 tr->allocated_snapshot = true;
597 }
598
599 return 0;
600}
601
Fabian Frederickad1438a2014-04-17 21:44:42 +0200602static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400603{
604 /*
605 * We don't free the ring buffer. instead, resize it because
606 * The max_tr ring buffer has some state (e.g. ring->clock) and
607 * we want preserve it.
608 */
609 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
610 set_buffer_entries(&tr->max_buffer, 1);
611 tracing_reset_online_cpus(&tr->max_buffer);
612 tr->allocated_snapshot = false;
613}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500614
615/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500616 * tracing_alloc_snapshot - allocate snapshot buffer.
617 *
618 * This only allocates the snapshot buffer if it isn't already
619 * allocated - it doesn't also take a snapshot.
620 *
621 * This is meant to be used in cases where the snapshot buffer needs
622 * to be set up for events that can't sleep but need to be able to
623 * trigger a snapshot.
624 */
625int tracing_alloc_snapshot(void)
626{
627 struct trace_array *tr = &global_trace;
628 int ret;
629
630 ret = alloc_snapshot(tr);
631 WARN_ON(ret < 0);
632
633 return ret;
634}
635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
636
637/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
639 *
640 * This is similar to trace_snapshot(), but it will allocate the
641 * snapshot buffer if it isn't already allocated. Use this only
642 * where it is safe to sleep, as the allocation may sleep.
643 *
644 * This causes a swap between the snapshot buffer and the current live
645 * tracing buffer. You can use this to take snapshots of the live
646 * trace when some condition is triggered, but continue to trace.
647 */
648void tracing_snapshot_alloc(void)
649{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500650 int ret;
651
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500652 ret = tracing_alloc_snapshot();
653 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400654 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656 tracing_snapshot();
657}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500659#else
660void tracing_snapshot(void)
661{
662 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
663}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500664EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500665int tracing_alloc_snapshot(void)
666{
667 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
668 return -ENODEV;
669}
670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500671void tracing_snapshot_alloc(void)
672{
673 /* Give warning */
674 tracing_snapshot();
675}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500677#endif /* CONFIG_TRACER_SNAPSHOT */
678
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400679static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400680{
681 if (tr->trace_buffer.buffer)
682 ring_buffer_record_off(tr->trace_buffer.buffer);
683 /*
684 * This flag is looked at when buffers haven't been allocated
685 * yet, or by some tracers (like irqsoff), that just want to
686 * know if the ring buffer has been disabled, but it can handle
687 * races of where it gets disabled but we still do a record.
688 * As the check is in the fast path of the tracers, it is more
689 * important to be fast than accurate.
690 */
691 tr->buffer_disabled = 1;
692 /* Make the flag seen by readers */
693 smp_wmb();
694}
695
Steven Rostedt499e5472012-02-22 15:50:28 -0500696/**
697 * tracing_off - turn off tracing buffers
698 *
699 * This function stops the tracing buffers from recording data.
700 * It does not disable any overhead the tracers themselves may
701 * be causing. This function simply causes all recording to
702 * the ring buffers to fail.
703 */
704void tracing_off(void)
705{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500707}
708EXPORT_SYMBOL_GPL(tracing_off);
709
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400710void disable_trace_on_warning(void)
711{
712 if (__disable_trace_on_warning)
713 tracing_off();
714}
715
Steven Rostedt499e5472012-02-22 15:50:28 -0500716/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400717 * tracer_tracing_is_on - show real state of ring buffer enabled
718 * @tr : the trace array to know if ring buffer is enabled
719 *
720 * Shows real state of the ring buffer if it is enabled or not.
721 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400722static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723{
724 if (tr->trace_buffer.buffer)
725 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
726 return !tr->buffer_disabled;
727}
728
Steven Rostedt499e5472012-02-22 15:50:28 -0500729/**
730 * tracing_is_on - show state of ring buffers enabled
731 */
732int tracing_is_on(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_is_on);
737
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400738static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200739{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200741
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 if (!str)
743 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800744 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200745 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800746 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200747 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200749 return 1;
750}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400751__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200752
Tim Bird0e950172010-02-25 15:36:43 -0800753static int __init set_tracing_thresh(char *str)
754{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800755 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800756 int ret;
757
758 if (!str)
759 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200760 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800761 if (ret < 0)
762 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800763 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800764 return 1;
765}
766__setup("tracing_thresh=", set_tracing_thresh);
767
Steven Rostedt57f50be2008-05-12 21:20:44 +0200768unsigned long nsecs_to_usecs(unsigned long nsecs)
769{
770 return nsecs / 1000;
771}
772
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200773/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200774static const char *trace_options[] = {
775 "print-parent",
776 "sym-offset",
777 "sym-addr",
778 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200779 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200780 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200781 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200782 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200783 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100784 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500785 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500786 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500787 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200788 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200789 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100790 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200791 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500792 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400793 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400794 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800795 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800796 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400797 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500798 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700799 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400800 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801 NULL
802};
803
Zhaolei5079f322009-08-25 16:12:56 +0800804static struct {
805 u64 (*func)(void);
806 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800807 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800808} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800809 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400812 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400813 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800814 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800815};
816
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200817/*
818 * trace_parser_get_init - gets the buffer for trace parser
819 */
820int trace_parser_get_init(struct trace_parser *parser, int size)
821{
822 memset(parser, 0, sizeof(*parser));
823
824 parser->buffer = kmalloc(size, GFP_KERNEL);
825 if (!parser->buffer)
826 return 1;
827
828 parser->size = size;
829 return 0;
830}
831
832/*
833 * trace_parser_put - frees the buffer for trace parser
834 */
835void trace_parser_put(struct trace_parser *parser)
836{
837 kfree(parser->buffer);
838}
839
840/*
841 * trace_get_user - reads the user input string separated by space
842 * (matched by isspace(ch))
843 *
844 * For each string found the 'struct trace_parser' is updated,
845 * and the function returns.
846 *
847 * Returns number of bytes read.
848 *
849 * See kernel/trace/trace.h for 'struct trace_parser' details.
850 */
851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
852 size_t cnt, loff_t *ppos)
853{
854 char ch;
855 size_t read = 0;
856 ssize_t ret;
857
858 if (!*ppos)
859 trace_parser_clear(parser);
860
861 ret = get_user(ch, ubuf++);
862 if (ret)
863 goto out;
864
865 read++;
866 cnt--;
867
868 /*
869 * The parser is not finished with the last write,
870 * continue reading the user input without skipping spaces.
871 */
872 if (!parser->cont) {
873 /* skip white space */
874 while (cnt && isspace(ch)) {
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878 read++;
879 cnt--;
880 }
881
882 /* only spaces were written */
883 if (isspace(ch)) {
884 *ppos += read;
885 ret = read;
886 goto out;
887 }
888
889 parser->idx = 0;
890 }
891
892 /* read the non-space input */
893 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800894 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895 parser->buffer[parser->idx++] = ch;
896 else {
897 ret = -EINVAL;
898 goto out;
899 }
900 ret = get_user(ch, ubuf++);
901 if (ret)
902 goto out;
903 read++;
904 cnt--;
905 }
906
907 /* We either got finished input or we have to wait for another call. */
908 if (isspace(ch)) {
909 parser->buffer[parser->idx] = 0;
910 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400911 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200912 parser->cont = true;
913 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400914 } else {
915 ret = -EINVAL;
916 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200917 }
918
919 *ppos += read;
920 ret = read;
921
922out:
923 return ret;
924}
925
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
927{
928 int len;
929 int ret;
930
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500931 if (!cnt)
932 return 0;
933
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200934 if (s->len <= s->readpos)
935 return -EBUSY;
936
937 len = s->len - s->readpos;
938 if (cnt > len)
939 cnt = len;
940 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500941 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200942 return -EFAULT;
943
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500944 cnt -= ret;
945
Steven Rostedte74da522009-03-04 20:31:11 -0500946 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200947 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200948}
949
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951{
952 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
954 if (s->len <= s->readpos)
955 return -EBUSY;
956
957 len = s->len - s->readpos;
958 if (cnt > len)
959 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300960 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961
Steven Rostedte74da522009-03-04 20:31:11 -0500962 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200963 return cnt;
964}
965
Tim Bird0e950172010-02-25 15:36:43 -0800966unsigned long __read_mostly tracing_thresh;
967
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400968#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400969/*
970 * Copy the new maximum trace into the separate maximum-trace
971 * structure. (this way the maximum trace is permanently saved,
972 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
973 */
974static void
975__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
976{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500977 struct trace_buffer *trace_buf = &tr->trace_buffer;
978 struct trace_buffer *max_buf = &tr->max_buffer;
979 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
980 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 max_buf->cpu = cpu;
983 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500985 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400986 max_data->critical_start = data->critical_start;
987 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300989 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400991 /*
992 * If tsk == current, then use current_uid(), as that does not use
993 * RCU. The irq tracer can be called out of RCU scope.
994 */
995 if (tsk == current)
996 max_data->uid = current_uid();
997 else
998 max_data->uid = task_uid(tsk);
999
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1001 max_data->policy = tsk->policy;
1002 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001003
1004 /* record this tasks comm */
1005 tracing_record_cmdline(tsk);
1006}
1007
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001008/**
1009 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1010 * @tr: tracer
1011 * @tsk: the task with the latency
1012 * @cpu: The cpu that initiated the trace.
1013 *
1014 * Flip the buffers between the @tr and the max_tr and record information
1015 * about which task was the cause of this latency.
1016 */
Ingo Molnare309b412008-05-12 21:20:51 +02001017void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001018update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1019{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001020 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001023 return;
1024
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001025 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001026
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001027 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001028 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001030 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001034
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001035 buf = tr->trace_buffer.buffer;
1036 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1037 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001039 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001040 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041}
1042
1043/**
1044 * update_max_tr_single - only copy one trace over, and reset the rest
1045 * @tr - tracer
1046 * @tsk - task with the latency
1047 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048 *
1049 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001050 */
Ingo Molnare309b412008-05-12 21:20:51 +02001051void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001052update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1053{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001054 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001056 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001057 return;
1058
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001059 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001060 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001061 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001062 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001063 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001065
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001066 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001067
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 if (ret == -EBUSY) {
1071 /*
1072 * We failed to swap the buffer due to a commit taking
1073 * place on this CPU. We fail to record, but we reset
1074 * the max trace buffer (no one writes directly to it)
1075 * and flag that it failed.
1076 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001077 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001078 "Failed to swap buffers due to commit in progress\n");
1079 }
1080
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001082
1083 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001084 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001086#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001087
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04001088static void wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001089{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001090 /* Iterators are static, they should be filled or empty */
1091 if (trace_buffer_iter(iter, iter->cpu_file))
1092 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001094 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095}
1096
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001097#ifdef CONFIG_FTRACE_STARTUP_TEST
1098static int run_tracer_selftest(struct tracer *type)
1099{
1100 struct trace_array *tr = &global_trace;
1101 struct tracer *saved_tracer = tr->current_trace;
1102 int ret;
1103
1104 if (!type->selftest || tracing_selftest_disabled)
1105 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106
1107 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 * Run a selftest on this tracer.
1109 * Here we reset the trace buffer, and set the current
1110 * tracer to be this tracer. The tracer can then run some
1111 * internal tracing to verify that everything is in order.
1112 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001113 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001114 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001115
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001116 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001118#ifdef CONFIG_TRACER_MAX_TRACE
1119 if (type->use_max_tr) {
1120 /* If we expanded the buffers, make sure the max is expanded too */
1121 if (ring_buffer_expanded)
1122 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1123 RING_BUFFER_ALL_CPUS);
1124 tr->allocated_snapshot = true;
1125 }
1126#endif
1127
1128 /* the test is responsible for initializing and enabling */
1129 pr_info("Testing tracer %s: ", type->name);
1130 ret = type->selftest(type, tr);
1131 /* the test is responsible for resetting too */
1132 tr->current_trace = saved_tracer;
1133 if (ret) {
1134 printk(KERN_CONT "FAILED!\n");
1135 /* Add the warning after printing 'FAILED' */
1136 WARN_ON(1);
1137 return -1;
1138 }
1139 /* Only reset on passing, to avoid touching corrupted buffers */
1140 tracing_reset_online_cpus(&tr->trace_buffer);
1141
1142#ifdef CONFIG_TRACER_MAX_TRACE
1143 if (type->use_max_tr) {
1144 tr->allocated_snapshot = false;
1145
1146 /* Shrink the max buffer again */
1147 if (ring_buffer_expanded)
1148 ring_buffer_resize(tr->max_buffer.buffer, 1,
1149 RING_BUFFER_ALL_CPUS);
1150 }
1151#endif
1152
1153 printk(KERN_CONT "PASSED\n");
1154 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001156#else
1157static inline int run_tracer_selftest(struct tracer *type)
1158{
1159 return 0;
1160}
1161#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001163/**
1164 * register_tracer - register a tracer with the ftrace system.
1165 * @type - the plugin for the tracer
1166 *
1167 * Register a new plugin tracer.
1168 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169int register_tracer(struct tracer *type)
1170{
1171 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172 int ret = 0;
1173
1174 if (!type->name) {
1175 pr_info("Tracer must have a name\n");
1176 return -1;
1177 }
1178
Dan Carpenter24a461d2010-07-10 12:06:44 +02001179 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001180 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1181 return -1;
1182 }
1183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001185
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001186 tracing_selftest_running = true;
1187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001188 for (t = trace_types; t; t = t->next) {
1189 if (strcmp(type->name, t->name) == 0) {
1190 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001191 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192 type->name);
1193 ret = -1;
1194 goto out;
1195 }
1196 }
1197
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001198 if (!type->set_flag)
1199 type->set_flag = &dummy_set_flag;
1200 if (!type->flags)
1201 type->flags = &dummy_tracer_flags;
1202 else
1203 if (!type->flags->opts)
1204 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001205
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001206 ret = run_tracer_selftest(type);
1207 if (ret < 0)
1208 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001209
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210 type->next = trace_types;
1211 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001214 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 mutex_unlock(&trace_types_lock);
1216
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 if (ret || !default_bootup_tracer)
1218 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001219
Li Zefanee6c2c12009-09-18 14:06:47 +08001220 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001221 goto out_unlock;
1222
1223 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1224 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001225 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 default_bootup_tracer = NULL;
1227 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001228 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001229#ifdef CONFIG_FTRACE_STARTUP_TEST
1230 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1231 type->name);
1232#endif
1233
1234 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001235 return ret;
1236}
1237
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001238void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001239{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001240 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001242 if (!buffer)
1243 return;
1244
Steven Rostedtf6339032009-09-04 12:35:16 -04001245 ring_buffer_record_disable(buffer);
1246
1247 /* Make sure all commits have finished */
1248 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001249 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
1251 ring_buffer_record_enable(buffer);
1252}
1253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001255{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001256 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001257 int cpu;
1258
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001259 if (!buffer)
1260 return;
1261
Steven Rostedt621968c2009-09-04 12:02:35 -04001262 ring_buffer_record_disable(buffer);
1263
1264 /* Make sure all commits have finished */
1265 synchronize_sched();
1266
Alexander Z Lam94571582013-08-02 18:36:16 -07001267 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001268
1269 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001270 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001271
1272 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273}
1274
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001275/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 struct trace_array *tr;
1279
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 tracing_reset_online_cpus(&tr->trace_buffer);
1282#ifdef CONFIG_TRACER_MAX_TRACE
1283 tracing_reset_online_cpus(&tr->max_buffer);
1284#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286}
1287
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001288#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001289#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001290static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1291static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1292static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1293static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001294static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001295
Steven Rostedt25b0b442008-05-12 21:21:00 +02001296/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001297static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001298
1299static void trace_init_cmdlines(void)
1300{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001301 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1302 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001303 cmdline_idx = 0;
1304}
1305
Carsten Emdeb5130b12009-09-13 01:43:07 +02001306int is_tracing_stopped(void)
1307{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001308 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001309}
1310
Steven Rostedt0f048702008-11-05 16:05:44 -05001311/**
1312 * tracing_start - quick start of the tracer
1313 *
1314 * If tracing is enabled but was stopped by tracing_stop,
1315 * this will start the tracer back up.
1316 */
1317void tracing_start(void)
1318{
1319 struct ring_buffer *buffer;
1320 unsigned long flags;
1321
1322 if (tracing_disabled)
1323 return;
1324
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001325 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1326 if (--global_trace.stop_count) {
1327 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001328 /* Someone screwed up their debugging */
1329 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001330 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001331 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001332 goto out;
1333 }
1334
Steven Rostedta2f80712010-03-12 19:56:00 -05001335 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001336 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001337
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001338 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001339 if (buffer)
1340 ring_buffer_record_enable(buffer);
1341
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001342#ifdef CONFIG_TRACER_MAX_TRACE
1343 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001344 if (buffer)
1345 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001347
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001348 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001349
Steven Rostedt0f048702008-11-05 16:05:44 -05001350 ftrace_start();
1351 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001352 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1353}
1354
1355static void tracing_start_tr(struct trace_array *tr)
1356{
1357 struct ring_buffer *buffer;
1358 unsigned long flags;
1359
1360 if (tracing_disabled)
1361 return;
1362
1363 /* If global, we need to also start the max tracer */
1364 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1365 return tracing_start();
1366
1367 raw_spin_lock_irqsave(&tr->start_lock, flags);
1368
1369 if (--tr->stop_count) {
1370 if (tr->stop_count < 0) {
1371 /* Someone screwed up their debugging */
1372 WARN_ON_ONCE(1);
1373 tr->stop_count = 0;
1374 }
1375 goto out;
1376 }
1377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001378 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 if (buffer)
1380 ring_buffer_record_enable(buffer);
1381
1382 out:
1383 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001384}
1385
1386/**
1387 * tracing_stop - quick stop of the tracer
1388 *
1389 * Light weight way to stop tracing. Use in conjunction with
1390 * tracing_start.
1391 */
1392void tracing_stop(void)
1393{
1394 struct ring_buffer *buffer;
1395 unsigned long flags;
1396
1397 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001398 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1399 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001400 goto out;
1401
Steven Rostedta2f80712010-03-12 19:56:00 -05001402 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001403 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001404
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001406 if (buffer)
1407 ring_buffer_record_disable(buffer);
1408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001409#ifdef CONFIG_TRACER_MAX_TRACE
1410 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001411 if (buffer)
1412 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001413#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001414
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001415 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001416
Steven Rostedt0f048702008-11-05 16:05:44 -05001417 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001418 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1419}
1420
1421static void tracing_stop_tr(struct trace_array *tr)
1422{
1423 struct ring_buffer *buffer;
1424 unsigned long flags;
1425
1426 /* If global, we need to also stop the max tracer */
1427 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1428 return tracing_stop();
1429
1430 raw_spin_lock_irqsave(&tr->start_lock, flags);
1431 if (tr->stop_count++)
1432 goto out;
1433
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001434 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001435 if (buffer)
1436 ring_buffer_record_disable(buffer);
1437
1438 out:
1439 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001440}
1441
Ingo Molnare309b412008-05-12 21:20:51 +02001442void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001443
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001444static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001445{
Carsten Emdea635cf02009-03-18 09:00:41 +01001446 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001447
1448 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001449 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001450
1451 /*
1452 * It's not the end of the world if we don't get
1453 * the lock, but we also don't want to spin
1454 * nor do we want to disable interrupts,
1455 * so if we miss here, then better luck next time.
1456 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001457 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001458 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001459
1460 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001461 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001462 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1463
Carsten Emdea635cf02009-03-18 09:00:41 +01001464 /*
1465 * Check whether the cmdline buffer at idx has a pid
1466 * mapped. We are going to overwrite that entry so we
1467 * need to clear the map_pid_to_cmdline. Otherwise we
1468 * would read the new comm for the old pid.
1469 */
1470 pid = map_cmdline_to_pid[idx];
1471 if (pid != NO_CMDLINE_MAP)
1472 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001473
Carsten Emdea635cf02009-03-18 09:00:41 +01001474 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475 map_pid_to_cmdline[tsk->pid] = idx;
1476
1477 cmdline_idx = idx;
1478 }
1479
1480 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1481
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001482 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001483
1484 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485}
1486
Steven Rostedt4ca53082009-03-16 19:20:15 -04001487void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001488{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489 unsigned map;
1490
Steven Rostedt4ca53082009-03-16 19:20:15 -04001491 if (!pid) {
1492 strcpy(comm, "<idle>");
1493 return;
1494 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Steven Rostedt74bf4072010-01-25 15:11:53 -05001496 if (WARN_ON_ONCE(pid < 0)) {
1497 strcpy(comm, "<XXX>");
1498 return;
1499 }
1500
Steven Rostedt4ca53082009-03-16 19:20:15 -04001501 if (pid > PID_MAX_DEFAULT) {
1502 strcpy(comm, "<...>");
1503 return;
1504 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001505
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001506 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001507 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001509 if (map != NO_CMDLINE_MAP)
1510 strcpy(comm, saved_cmdlines[map]);
1511 else
1512 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001514 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001515 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516}
1517
Ingo Molnare309b412008-05-12 21:20:51 +02001518void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001520 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001521 return;
1522
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001523 if (!__this_cpu_read(trace_cmdline_save))
1524 return;
1525
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001526 if (trace_save_cmdline(tsk))
1527 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528}
1529
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001530void
Steven Rostedt38697052008-10-01 13:14:09 -04001531tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1532 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533{
1534 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535
Steven Rostedt777e2082008-09-29 23:02:42 -04001536 entry->preempt_count = pc & 0xff;
1537 entry->pid = (tsk) ? tsk->pid : 0;
1538 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001539#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001540 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001541#else
1542 TRACE_FLAG_IRQS_NOSUPPORT |
1543#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1545 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001546 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1547 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001548}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001549EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550
Steven Rostedte77405a2009-09-02 14:17:06 -04001551struct ring_buffer_event *
1552trace_buffer_lock_reserve(struct ring_buffer *buffer,
1553 int type,
1554 unsigned long len,
1555 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001556{
1557 struct ring_buffer_event *event;
1558
Steven Rostedte77405a2009-09-02 14:17:06 -04001559 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001560 if (event != NULL) {
1561 struct trace_entry *ent = ring_buffer_event_data(event);
1562
1563 tracing_generic_entry_update(ent, flags, pc);
1564 ent->type = type;
1565 }
1566
1567 return event;
1568}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001569
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001570void
1571__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1572{
1573 __this_cpu_write(trace_cmdline_save, true);
1574 ring_buffer_unlock_commit(buffer, event);
1575}
1576
Steven Rostedte77405a2009-09-02 14:17:06 -04001577static inline void
1578__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1579 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001580 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001581{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001582 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001583
Steven Rostedte77405a2009-09-02 14:17:06 -04001584 ftrace_trace_stack(buffer, flags, 6, pc);
1585 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001586}
1587
Steven Rostedte77405a2009-09-02 14:17:06 -04001588void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1589 struct ring_buffer_event *event,
1590 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001591{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001592 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001593}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001594EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001595
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001596static struct ring_buffer *temp_buffer;
1597
Steven Rostedtef5580d2009-02-27 19:38:04 -05001598struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001599trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1600 struct ftrace_event_file *ftrace_file,
1601 int type, unsigned long len,
1602 unsigned long flags, int pc)
1603{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001604 struct ring_buffer_event *entry;
1605
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001606 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001607 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001608 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001609 /*
1610 * If tracing is off, but we have triggers enabled
1611 * we still need to look at the event data. Use the temp_buffer
1612 * to store the trace event for the tigger to use. It's recusive
1613 * safe and will not be recorded anywhere.
1614 */
1615 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1616 *current_rb = temp_buffer;
1617 entry = trace_buffer_lock_reserve(*current_rb,
1618 type, len, flags, pc);
1619 }
1620 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001621}
1622EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1623
1624struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001625trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1626 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001627 unsigned long flags, int pc)
1628{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001629 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001630 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001631 type, len, flags, pc);
1632}
Steven Rostedt94487d62009-05-05 19:22:53 -04001633EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001634
Steven Rostedte77405a2009-09-02 14:17:06 -04001635void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1636 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001637 unsigned long flags, int pc)
1638{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001639 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001640}
Steven Rostedt94487d62009-05-05 19:22:53 -04001641EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001642
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001643void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1644 struct ring_buffer_event *event,
1645 unsigned long flags, int pc,
1646 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001647{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001648 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001649
1650 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1651 ftrace_trace_userstack(buffer, flags, pc);
1652}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001653EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001654
Steven Rostedte77405a2009-09-02 14:17:06 -04001655void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1656 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001657{
Steven Rostedte77405a2009-09-02 14:17:06 -04001658 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001659}
Steven Rostedt12acd472009-04-17 16:01:56 -04001660EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001661
Ingo Molnare309b412008-05-12 21:20:51 +02001662void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001663trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001664 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1665 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001666{
Tom Zanussie1112b42009-03-31 00:48:49 -05001667 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001668 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001669 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001670 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001671
Steven Rostedtd7690412008-10-01 00:29:53 -04001672 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001673 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001674 return;
1675
Steven Rostedte77405a2009-09-02 14:17:06 -04001676 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001677 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001678 if (!event)
1679 return;
1680 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001681 entry->ip = ip;
1682 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001683
Tom Zanussif306cc82013-10-24 08:34:17 -05001684 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001685 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001686}
1687
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001688#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001689
1690#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1691struct ftrace_stack {
1692 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1693};
1694
1695static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1696static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1697
Steven Rostedte77405a2009-09-02 14:17:06 -04001698static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001699 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001700 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001701{
Tom Zanussie1112b42009-03-31 00:48:49 -05001702 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001703 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001704 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001705 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001706 int use_stack;
1707 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001708
1709 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001710 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001711
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001712 /*
1713 * Since events can happen in NMIs there's no safe way to
1714 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1715 * or NMI comes in, it will just have to use the default
1716 * FTRACE_STACK_SIZE.
1717 */
1718 preempt_disable_notrace();
1719
Shan Wei82146522012-11-19 13:21:01 +08001720 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001721 /*
1722 * We don't need any atomic variables, just a barrier.
1723 * If an interrupt comes in, we don't care, because it would
1724 * have exited and put the counter back to what we want.
1725 * We just need a barrier to keep gcc from moving things
1726 * around.
1727 */
1728 barrier();
1729 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001730 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001731 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1732
1733 if (regs)
1734 save_stack_trace_regs(regs, &trace);
1735 else
1736 save_stack_trace(&trace);
1737
1738 if (trace.nr_entries > size)
1739 size = trace.nr_entries;
1740 } else
1741 /* From now on, use_stack is a boolean */
1742 use_stack = 0;
1743
1744 size *= sizeof(unsigned long);
1745
1746 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1747 sizeof(*entry) + size, flags, pc);
1748 if (!event)
1749 goto out;
1750 entry = ring_buffer_event_data(event);
1751
1752 memset(&entry->caller, 0, size);
1753
1754 if (use_stack)
1755 memcpy(&entry->caller, trace.entries,
1756 trace.nr_entries * sizeof(unsigned long));
1757 else {
1758 trace.max_entries = FTRACE_STACK_ENTRIES;
1759 trace.entries = entry->caller;
1760 if (regs)
1761 save_stack_trace_regs(regs, &trace);
1762 else
1763 save_stack_trace(&trace);
1764 }
1765
1766 entry->size = trace.nr_entries;
1767
Tom Zanussif306cc82013-10-24 08:34:17 -05001768 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001769 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001770
1771 out:
1772 /* Again, don't let gcc optimize things here */
1773 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001774 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001775 preempt_enable_notrace();
1776
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001777}
1778
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001779void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1780 int skip, int pc, struct pt_regs *regs)
1781{
1782 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1783 return;
1784
1785 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1786}
1787
Steven Rostedte77405a2009-09-02 14:17:06 -04001788void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1789 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001790{
1791 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1792 return;
1793
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001794 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001795}
1796
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001797void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1798 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001799{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001800 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001801}
1802
Steven Rostedt03889382009-12-11 09:48:22 -05001803/**
1804 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001805 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001806 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001807void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001808{
1809 unsigned long flags;
1810
1811 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001812 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001813
1814 local_save_flags(flags);
1815
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001816 /*
1817 * Skip 3 more, seems to get us at the caller of
1818 * this function.
1819 */
1820 skip += 3;
1821 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1822 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001823}
1824
Steven Rostedt91e86e52010-11-10 12:56:12 +01001825static DEFINE_PER_CPU(int, user_stack_count);
1826
Steven Rostedte77405a2009-09-02 14:17:06 -04001827void
1828ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001829{
Tom Zanussie1112b42009-03-31 00:48:49 -05001830 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001831 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001832 struct userstack_entry *entry;
1833 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001834
1835 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1836 return;
1837
Steven Rostedtb6345872010-03-12 20:03:30 -05001838 /*
1839 * NMIs can not handle page faults, even with fix ups.
1840 * The save user stack can (and often does) fault.
1841 */
1842 if (unlikely(in_nmi()))
1843 return;
1844
Steven Rostedt91e86e52010-11-10 12:56:12 +01001845 /*
1846 * prevent recursion, since the user stack tracing may
1847 * trigger other kernel events.
1848 */
1849 preempt_disable();
1850 if (__this_cpu_read(user_stack_count))
1851 goto out;
1852
1853 __this_cpu_inc(user_stack_count);
1854
Steven Rostedte77405a2009-09-02 14:17:06 -04001855 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001856 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001857 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001858 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001859 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001860
Steven Rostedt48659d32009-09-11 11:36:23 -04001861 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001862 memset(&entry->caller, 0, sizeof(entry->caller));
1863
1864 trace.nr_entries = 0;
1865 trace.max_entries = FTRACE_STACK_ENTRIES;
1866 trace.skip = 0;
1867 trace.entries = entry->caller;
1868
1869 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001870 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001871 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001872
Li Zefan1dbd1952010-12-09 15:47:56 +08001873 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001874 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001875 out:
1876 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001877}
1878
Hannes Eder4fd27352009-02-10 19:44:12 +01001879#ifdef UNUSED
1880static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001881{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001882 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001883}
Hannes Eder4fd27352009-02-10 19:44:12 +01001884#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001885
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001886#endif /* CONFIG_STACKTRACE */
1887
Steven Rostedt07d777f2011-09-22 14:01:55 -04001888/* created for use with alloc_percpu */
1889struct trace_buffer_struct {
1890 char buffer[TRACE_BUF_SIZE];
1891};
1892
1893static struct trace_buffer_struct *trace_percpu_buffer;
1894static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1895static struct trace_buffer_struct *trace_percpu_irq_buffer;
1896static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1897
1898/*
1899 * The buffer used is dependent on the context. There is a per cpu
1900 * buffer for normal context, softirq contex, hard irq context and
1901 * for NMI context. Thise allows for lockless recording.
1902 *
1903 * Note, if the buffers failed to be allocated, then this returns NULL
1904 */
1905static char *get_trace_buf(void)
1906{
1907 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001908
1909 /*
1910 * If we have allocated per cpu buffers, then we do not
1911 * need to do any locking.
1912 */
1913 if (in_nmi())
1914 percpu_buffer = trace_percpu_nmi_buffer;
1915 else if (in_irq())
1916 percpu_buffer = trace_percpu_irq_buffer;
1917 else if (in_softirq())
1918 percpu_buffer = trace_percpu_sirq_buffer;
1919 else
1920 percpu_buffer = trace_percpu_buffer;
1921
1922 if (!percpu_buffer)
1923 return NULL;
1924
Shan Weid8a03492012-11-13 09:53:04 +08001925 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001926}
1927
1928static int alloc_percpu_trace_buffer(void)
1929{
1930 struct trace_buffer_struct *buffers;
1931 struct trace_buffer_struct *sirq_buffers;
1932 struct trace_buffer_struct *irq_buffers;
1933 struct trace_buffer_struct *nmi_buffers;
1934
1935 buffers = alloc_percpu(struct trace_buffer_struct);
1936 if (!buffers)
1937 goto err_warn;
1938
1939 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1940 if (!sirq_buffers)
1941 goto err_sirq;
1942
1943 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1944 if (!irq_buffers)
1945 goto err_irq;
1946
1947 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1948 if (!nmi_buffers)
1949 goto err_nmi;
1950
1951 trace_percpu_buffer = buffers;
1952 trace_percpu_sirq_buffer = sirq_buffers;
1953 trace_percpu_irq_buffer = irq_buffers;
1954 trace_percpu_nmi_buffer = nmi_buffers;
1955
1956 return 0;
1957
1958 err_nmi:
1959 free_percpu(irq_buffers);
1960 err_irq:
1961 free_percpu(sirq_buffers);
1962 err_sirq:
1963 free_percpu(buffers);
1964 err_warn:
1965 WARN(1, "Could not allocate percpu trace_printk buffer");
1966 return -ENOMEM;
1967}
1968
Steven Rostedt81698832012-10-11 10:15:05 -04001969static int buffers_allocated;
1970
Steven Rostedt07d777f2011-09-22 14:01:55 -04001971void trace_printk_init_buffers(void)
1972{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001973 if (buffers_allocated)
1974 return;
1975
1976 if (alloc_percpu_trace_buffer())
1977 return;
1978
Steven Rostedt2184db42014-05-28 13:14:40 -04001979 /* trace_printk() is for debug use only. Don't use it in production. */
1980
1981 pr_warning("\n**********************************************************\n");
1982 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1983 pr_warning("** **\n");
1984 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
1985 pr_warning("** **\n");
1986 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
1987 pr_warning("** unsafe for produciton use. **\n");
1988 pr_warning("** **\n");
1989 pr_warning("** If you see this message and you are not debugging **\n");
1990 pr_warning("** the kernel, report this immediately to your vendor! **\n");
1991 pr_warning("** **\n");
1992 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
1993 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04001994
Steven Rostedtb382ede62012-10-10 21:44:34 -04001995 /* Expand the buffers to set size */
1996 tracing_update_buffers();
1997
Steven Rostedt07d777f2011-09-22 14:01:55 -04001998 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001999
2000 /*
2001 * trace_printk_init_buffers() can be called by modules.
2002 * If that happens, then we need to start cmdline recording
2003 * directly here. If the global_trace.buffer is already
2004 * allocated here, then this was called by module code.
2005 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002006 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002007 tracing_start_cmdline_record();
2008}
2009
2010void trace_printk_start_comm(void)
2011{
2012 /* Start tracing comms if trace printk is set */
2013 if (!buffers_allocated)
2014 return;
2015 tracing_start_cmdline_record();
2016}
2017
2018static void trace_printk_start_stop_comm(int enabled)
2019{
2020 if (!buffers_allocated)
2021 return;
2022
2023 if (enabled)
2024 tracing_start_cmdline_record();
2025 else
2026 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002027}
2028
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002029/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002030 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002031 *
2032 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002033int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002034{
Tom Zanussie1112b42009-03-31 00:48:49 -05002035 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002036 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002037 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002038 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002039 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002040 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002041 char *tbuffer;
2042 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002043
2044 if (unlikely(tracing_selftest_running || tracing_disabled))
2045 return 0;
2046
2047 /* Don't pollute graph traces with trace_vprintk internals */
2048 pause_graph_tracing();
2049
2050 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002051 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002052
Steven Rostedt07d777f2011-09-22 14:01:55 -04002053 tbuffer = get_trace_buf();
2054 if (!tbuffer) {
2055 len = 0;
2056 goto out;
2057 }
2058
2059 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2060
2061 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002062 goto out;
2063
Steven Rostedt07d777f2011-09-22 14:01:55 -04002064 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002065 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002066 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002067 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2068 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002069 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002070 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002071 entry = ring_buffer_event_data(event);
2072 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002073 entry->fmt = fmt;
2074
Steven Rostedt07d777f2011-09-22 14:01:55 -04002075 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002076 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002077 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002078 ftrace_trace_stack(buffer, flags, 6, pc);
2079 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002080
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002081out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002082 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002083 unpause_graph_tracing();
2084
2085 return len;
2086}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002087EXPORT_SYMBOL_GPL(trace_vbprintk);
2088
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002089static int
2090__trace_array_vprintk(struct ring_buffer *buffer,
2091 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002092{
Tom Zanussie1112b42009-03-31 00:48:49 -05002093 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002094 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002095 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002096 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002097 unsigned long flags;
2098 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002099
2100 if (tracing_disabled || tracing_selftest_running)
2101 return 0;
2102
Steven Rostedt07d777f2011-09-22 14:01:55 -04002103 /* Don't pollute graph traces with trace_vprintk internals */
2104 pause_graph_tracing();
2105
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002106 pc = preempt_count();
2107 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002108
Steven Rostedt07d777f2011-09-22 14:01:55 -04002109
2110 tbuffer = get_trace_buf();
2111 if (!tbuffer) {
2112 len = 0;
2113 goto out;
2114 }
2115
2116 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2117 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002118 goto out;
2119
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002121 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002122 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002123 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002124 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002125 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002126 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002127 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002128
Steven Rostedt07d777f2011-09-22 14:01:55 -04002129 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002130 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002131 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002132 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002133 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002134 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002135 out:
2136 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002137 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002138
2139 return len;
2140}
Steven Rostedt659372d2009-09-03 19:11:07 -04002141
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002142int trace_array_vprintk(struct trace_array *tr,
2143 unsigned long ip, const char *fmt, va_list args)
2144{
2145 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2146}
2147
2148int trace_array_printk(struct trace_array *tr,
2149 unsigned long ip, const char *fmt, ...)
2150{
2151 int ret;
2152 va_list ap;
2153
2154 if (!(trace_flags & TRACE_ITER_PRINTK))
2155 return 0;
2156
2157 va_start(ap, fmt);
2158 ret = trace_array_vprintk(tr, ip, fmt, ap);
2159 va_end(ap);
2160 return ret;
2161}
2162
2163int trace_array_printk_buf(struct ring_buffer *buffer,
2164 unsigned long ip, const char *fmt, ...)
2165{
2166 int ret;
2167 va_list ap;
2168
2169 if (!(trace_flags & TRACE_ITER_PRINTK))
2170 return 0;
2171
2172 va_start(ap, fmt);
2173 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2174 va_end(ap);
2175 return ret;
2176}
2177
Steven Rostedt659372d2009-09-03 19:11:07 -04002178int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2179{
Steven Rostedta813a152009-10-09 01:41:35 -04002180 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002181}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002182EXPORT_SYMBOL_GPL(trace_vprintk);
2183
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002184static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002185{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002186 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2187
Steven Rostedt5a90f572008-09-03 17:42:51 -04002188 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002189 if (buf_iter)
2190 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002191}
2192
Ingo Molnare309b412008-05-12 21:20:51 +02002193static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002194peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2195 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002196{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002197 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002198 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002199
Steven Rostedtd7690412008-10-01 00:29:53 -04002200 if (buf_iter)
2201 event = ring_buffer_iter_peek(buf_iter, ts);
2202 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002203 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002204 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002205
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002206 if (event) {
2207 iter->ent_size = ring_buffer_event_length(event);
2208 return ring_buffer_event_data(event);
2209 }
2210 iter->ent_size = 0;
2211 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002212}
Steven Rostedtd7690412008-10-01 00:29:53 -04002213
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002214static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002215__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2216 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002217{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002218 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002219 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002220 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002221 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002222 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002223 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002224 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002225 int cpu;
2226
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002227 /*
2228 * If we are in a per_cpu trace file, don't bother by iterating over
2229 * all cpu and peek directly.
2230 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002231 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002232 if (ring_buffer_empty_cpu(buffer, cpu_file))
2233 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002234 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002235 if (ent_cpu)
2236 *ent_cpu = cpu_file;
2237
2238 return ent;
2239 }
2240
Steven Rostedtab464282008-05-12 21:21:00 +02002241 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002242
2243 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002245
Steven Rostedtbc21b472010-03-31 19:49:26 -04002246 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002247
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002248 /*
2249 * Pick the entry with the smallest timestamp:
2250 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002251 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002252 next = ent;
2253 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002254 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002255 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002256 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002257 }
2258 }
2259
Steven Rostedt12b5da32012-03-27 10:43:28 -04002260 iter->ent_size = next_size;
2261
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002262 if (ent_cpu)
2263 *ent_cpu = next_cpu;
2264
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002265 if (ent_ts)
2266 *ent_ts = next_ts;
2267
Steven Rostedtbc21b472010-03-31 19:49:26 -04002268 if (missing_events)
2269 *missing_events = next_lost;
2270
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002271 return next;
2272}
2273
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002274/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002275struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2276 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002277{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002278 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002279}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002280
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002281/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002282void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002283{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002284 iter->ent = __find_next_entry(iter, &iter->cpu,
2285 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002286
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002287 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002288 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002289
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002290 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002291}
2292
Ingo Molnare309b412008-05-12 21:20:51 +02002293static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002294{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002295 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002296 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297}
2298
Ingo Molnare309b412008-05-12 21:20:51 +02002299static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300{
2301 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002302 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002303 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002304
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002305 WARN_ON_ONCE(iter->leftover);
2306
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 (*pos)++;
2308
2309 /* can't go backwards */
2310 if (iter->idx > i)
2311 return NULL;
2312
2313 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002314 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 else
2316 ent = iter;
2317
2318 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002319 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002320
2321 iter->pos = *pos;
2322
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002323 return ent;
2324}
2325
Jason Wessel955b61e2010-08-05 09:22:23 -05002326void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002327{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002328 struct ring_buffer_event *event;
2329 struct ring_buffer_iter *buf_iter;
2330 unsigned long entries = 0;
2331 u64 ts;
2332
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002333 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002334
Steven Rostedt6d158a82012-06-27 20:46:14 -04002335 buf_iter = trace_buffer_iter(iter, cpu);
2336 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002337 return;
2338
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002339 ring_buffer_iter_reset(buf_iter);
2340
2341 /*
2342 * We could have the case with the max latency tracers
2343 * that a reset never took place on a cpu. This is evident
2344 * by the timestamp being before the start of the buffer.
2345 */
2346 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002347 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002348 break;
2349 entries++;
2350 ring_buffer_read(buf_iter, NULL);
2351 }
2352
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002353 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002354}
2355
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002356/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002357 * The current tracer is copied to avoid a global locking
2358 * all around.
2359 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360static void *s_start(struct seq_file *m, loff_t *pos)
2361{
2362 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002363 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002364 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365 void *p = NULL;
2366 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002367 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002369 /*
2370 * copy the tracer to avoid using a global lock all around.
2371 * iter->trace is a copy of current_trace, the pointer to the
2372 * name may be used instead of a strcmp(), as iter->trace->name
2373 * will point to the same string as current_trace->name.
2374 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002376 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2377 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002378 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002379
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002380#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002381 if (iter->snapshot && iter->trace->use_max_tr)
2382 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002383#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002384
2385 if (!iter->snapshot)
2386 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002387
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388 if (*pos != iter->pos) {
2389 iter->ent = NULL;
2390 iter->cpu = 0;
2391 iter->idx = -1;
2392
Steven Rostedtae3b5092013-01-23 15:22:59 -05002393 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002394 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002395 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002396 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002397 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002398
Lai Jiangshanac91d852010-03-02 17:54:50 +08002399 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002400 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2401 ;
2402
2403 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002404 /*
2405 * If we overflowed the seq_file before, then we want
2406 * to just reuse the trace_seq buffer again.
2407 */
2408 if (iter->leftover)
2409 p = iter;
2410 else {
2411 l = *pos - 1;
2412 p = s_next(m, p, &l);
2413 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414 }
2415
Lai Jiangshan4f535962009-05-18 19:35:34 +08002416 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002417 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 return p;
2419}
2420
2421static void s_stop(struct seq_file *m, void *p)
2422{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002423 struct trace_iterator *iter = m->private;
2424
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002426 if (iter->snapshot && iter->trace->use_max_tr)
2427 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002428#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002429
2430 if (!iter->snapshot)
2431 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002432
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002433 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002434 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435}
2436
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002437static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438get_total_entries(struct trace_buffer *buf,
2439 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002440{
2441 unsigned long count;
2442 int cpu;
2443
2444 *total = 0;
2445 *entries = 0;
2446
2447 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002448 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002449 /*
2450 * If this buffer has skipped entries, then we hold all
2451 * entries for the trace and we need to ignore the
2452 * ones before the time stamp.
2453 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002454 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2455 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002456 /* total is the same as the entries */
2457 *total += count;
2458 } else
2459 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002460 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002461 *entries += count;
2462 }
2463}
2464
Ingo Molnare309b412008-05-12 21:20:51 +02002465static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002466{
Michael Ellermana6168352008-08-20 16:36:11 -07002467 seq_puts(m, "# _------=> CPU# \n");
2468 seq_puts(m, "# / _-----=> irqs-off \n");
2469 seq_puts(m, "# | / _----=> need-resched \n");
2470 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2471 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002472 seq_puts(m, "# |||| / delay \n");
2473 seq_puts(m, "# cmd pid ||||| time | caller \n");
2474 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002475}
2476
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002479 unsigned long total;
2480 unsigned long entries;
2481
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002482 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002483 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2484 entries, total, num_online_cpus());
2485 seq_puts(m, "#\n");
2486}
2487
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002489{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002491 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002492 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002493}
2494
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002495static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002496{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002497 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002498 seq_puts(m, "# _-----=> irqs-off\n");
2499 seq_puts(m, "# / _----=> need-resched\n");
2500 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2501 seq_puts(m, "# || / _--=> preempt-depth\n");
2502 seq_puts(m, "# ||| / delay\n");
2503 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2504 seq_puts(m, "# | | | |||| | |\n");
2505}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002506
Jiri Olsa62b915f2010-04-02 19:01:22 +02002507void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002508print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2509{
2510 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002511 struct trace_buffer *buf = iter->trace_buffer;
2512 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002513 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002514 unsigned long entries;
2515 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002516 const char *name = "preemption";
2517
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002518 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002520 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002522 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002523 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002524 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002525 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002526 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002527 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002528 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002529 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002530 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002531 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002532#if defined(CONFIG_PREEMPT_NONE)
2533 "server",
2534#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2535 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002536#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002537 "preempt",
2538#else
2539 "unknown",
2540#endif
2541 /* These are reserved for later use */
2542 0, 0, 0, 0);
2543#ifdef CONFIG_SMP
2544 seq_printf(m, " #P:%d)\n", num_online_cpus());
2545#else
2546 seq_puts(m, ")\n");
2547#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002548 seq_puts(m, "# -----------------\n");
2549 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002551 data->comm, data->pid,
2552 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002553 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002554 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002555
2556 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002557 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002558 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2559 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002560 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002561 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2562 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002563 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002564 }
2565
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002566 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567}
2568
Steven Rostedta3097202008-11-07 22:36:02 -05002569static void test_cpu_buff_start(struct trace_iterator *iter)
2570{
2571 struct trace_seq *s = &iter->seq;
2572
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002573 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2574 return;
2575
2576 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2577 return;
2578
Rusty Russell44623442009-01-01 10:12:23 +10302579 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002580 return;
2581
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002582 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002583 return;
2584
Rusty Russell44623442009-01-01 10:12:23 +10302585 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002586
2587 /* Don't print started cpu buffer for the first entry of the trace */
2588 if (iter->idx > 1)
2589 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2590 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002591}
2592
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002593static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002594{
Steven Rostedt214023c2008-05-12 21:20:46 +02002595 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002597 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002598 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002600 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002601
Steven Rostedta3097202008-11-07 22:36:02 -05002602 test_cpu_buff_start(iter);
2603
Steven Rostedtf633cef2008-12-23 23:24:13 -05002604 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002605
2606 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002607 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2608 if (!trace_print_lat_context(iter))
2609 goto partial;
2610 } else {
2611 if (!trace_print_context(iter))
2612 goto partial;
2613 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002614 }
2615
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002616 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002617 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002618
2619 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2620 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002621
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002622 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002623partial:
2624 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002625}
2626
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002627static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002628{
2629 struct trace_seq *s = &iter->seq;
2630 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002631 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002632
2633 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002634
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002635 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002636 if (!trace_seq_printf(s, "%d %d %llu ",
2637 entry->pid, iter->cpu, iter->ts))
2638 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002639 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002640
Steven Rostedtf633cef2008-12-23 23:24:13 -05002641 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002642 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002643 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002644
2645 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2646 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002647
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002648 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002649partial:
2650 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002651}
2652
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002653static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002654{
2655 struct trace_seq *s = &iter->seq;
2656 unsigned char newline = '\n';
2657 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002658 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002659
2660 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002661
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002662 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2663 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2664 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2665 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2666 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002667
Steven Rostedtf633cef2008-12-23 23:24:13 -05002668 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002669 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002670 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002671 if (ret != TRACE_TYPE_HANDLED)
2672 return ret;
2673 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002674
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002675 SEQ_PUT_FIELD_RET(s, newline);
2676
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002677 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002678}
2679
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002680static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002681{
2682 struct trace_seq *s = &iter->seq;
2683 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002684 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002685
2686 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002687
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002688 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2689 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002690 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002691 SEQ_PUT_FIELD_RET(s, iter->ts);
2692 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002693
Steven Rostedtf633cef2008-12-23 23:24:13 -05002694 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002695 return event ? event->funcs->binary(iter, 0, event) :
2696 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002697}
2698
Jiri Olsa62b915f2010-04-02 19:01:22 +02002699int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002700{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002701 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002702 int cpu;
2703
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002704 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002705 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002706 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002707 buf_iter = trace_buffer_iter(iter, cpu);
2708 if (buf_iter) {
2709 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002710 return 0;
2711 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002712 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002713 return 0;
2714 }
2715 return 1;
2716 }
2717
Steven Rostedtab464282008-05-12 21:21:00 +02002718 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002719 buf_iter = trace_buffer_iter(iter, cpu);
2720 if (buf_iter) {
2721 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002722 return 0;
2723 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002724 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002725 return 0;
2726 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002727 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002728
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002729 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002730}
2731
Lai Jiangshan4f535962009-05-18 19:35:34 +08002732/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002733enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002734{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735 enum print_line_t ret;
2736
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002737 if (iter->lost_events &&
2738 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2739 iter->cpu, iter->lost_events))
2740 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002741
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002742 if (iter->trace && iter->trace->print_line) {
2743 ret = iter->trace->print_line(iter);
2744 if (ret != TRACE_TYPE_UNHANDLED)
2745 return ret;
2746 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002747
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002748 if (iter->ent->type == TRACE_BPUTS &&
2749 trace_flags & TRACE_ITER_PRINTK &&
2750 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2751 return trace_print_bputs_msg_only(iter);
2752
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002753 if (iter->ent->type == TRACE_BPRINT &&
2754 trace_flags & TRACE_ITER_PRINTK &&
2755 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002756 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002757
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002758 if (iter->ent->type == TRACE_PRINT &&
2759 trace_flags & TRACE_ITER_PRINTK &&
2760 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002761 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002762
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002763 if (trace_flags & TRACE_ITER_BIN)
2764 return print_bin_fmt(iter);
2765
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002766 if (trace_flags & TRACE_ITER_HEX)
2767 return print_hex_fmt(iter);
2768
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002769 if (trace_flags & TRACE_ITER_RAW)
2770 return print_raw_fmt(iter);
2771
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002772 return print_trace_fmt(iter);
2773}
2774
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002775void trace_latency_header(struct seq_file *m)
2776{
2777 struct trace_iterator *iter = m->private;
2778
2779 /* print nothing if the buffers are empty */
2780 if (trace_empty(iter))
2781 return;
2782
2783 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2784 print_trace_header(m, iter);
2785
2786 if (!(trace_flags & TRACE_ITER_VERBOSE))
2787 print_lat_help_header(m);
2788}
2789
Jiri Olsa62b915f2010-04-02 19:01:22 +02002790void trace_default_header(struct seq_file *m)
2791{
2792 struct trace_iterator *iter = m->private;
2793
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002794 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2795 return;
2796
Jiri Olsa62b915f2010-04-02 19:01:22 +02002797 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2798 /* print nothing if the buffers are empty */
2799 if (trace_empty(iter))
2800 return;
2801 print_trace_header(m, iter);
2802 if (!(trace_flags & TRACE_ITER_VERBOSE))
2803 print_lat_help_header(m);
2804 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002805 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2806 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002807 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002808 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002809 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002810 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002811 }
2812}
2813
Steven Rostedte0a413f2011-09-29 21:26:16 -04002814static void test_ftrace_alive(struct seq_file *m)
2815{
2816 if (!ftrace_is_dead())
2817 return;
2818 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2819 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2820}
2821
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002822#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002823static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002824{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002825 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2826 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2827 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002828 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002829 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2830 seq_printf(m, "# is not a '0' or '1')\n");
2831}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002832
2833static void show_snapshot_percpu_help(struct seq_file *m)
2834{
2835 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2836#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2837 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2838 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2839#else
2840 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2841 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2842#endif
2843 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2844 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2845 seq_printf(m, "# is not a '0' or '1')\n");
2846}
2847
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002848static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2849{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002850 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002851 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2852 else
2853 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2854
2855 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002856 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2857 show_snapshot_main_help(m);
2858 else
2859 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002860}
2861#else
2862/* Should never be called */
2863static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2864#endif
2865
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002866static int s_show(struct seq_file *m, void *v)
2867{
2868 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002869 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002870
2871 if (iter->ent == NULL) {
2872 if (iter->tr) {
2873 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2874 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002875 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002876 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002877 if (iter->snapshot && trace_empty(iter))
2878 print_snapshot_help(m, iter);
2879 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002880 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002881 else
2882 trace_default_header(m);
2883
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002884 } else if (iter->leftover) {
2885 /*
2886 * If we filled the seq_file buffer earlier, we
2887 * want to just show it now.
2888 */
2889 ret = trace_print_seq(m, &iter->seq);
2890
2891 /* ret should this time be zero, but you never know */
2892 iter->leftover = ret;
2893
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002894 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002895 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002896 ret = trace_print_seq(m, &iter->seq);
2897 /*
2898 * If we overflow the seq_file buffer, then it will
2899 * ask us for this data again at start up.
2900 * Use that instead.
2901 * ret is 0 if seq_file write succeeded.
2902 * -1 otherwise.
2903 */
2904 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002905 }
2906
2907 return 0;
2908}
2909
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002910/*
2911 * Should be used after trace_array_get(), trace_types_lock
2912 * ensures that i_cdev was already initialized.
2913 */
2914static inline int tracing_get_cpu(struct inode *inode)
2915{
2916 if (inode->i_cdev) /* See trace_create_cpu_file() */
2917 return (long)inode->i_cdev - 1;
2918 return RING_BUFFER_ALL_CPUS;
2919}
2920
James Morris88e9d342009-09-22 16:43:43 -07002921static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002922 .start = s_start,
2923 .next = s_next,
2924 .stop = s_stop,
2925 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926};
2927
Ingo Molnare309b412008-05-12 21:20:51 +02002928static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002929__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002930{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002931 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002932 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002933 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002934
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002935 if (tracing_disabled)
2936 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002937
Jiri Olsa50e18b92012-04-25 10:23:39 +02002938 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002939 if (!iter)
2940 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002941
Steven Rostedt6d158a82012-06-27 20:46:14 -04002942 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2943 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002944 if (!iter->buffer_iter)
2945 goto release;
2946
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002947 /*
2948 * We make a copy of the current tracer to avoid concurrent
2949 * changes on it while we are reading.
2950 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002952 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002953 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002954 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002955
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002956 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002957
Li Zefan79f55992009-06-15 14:58:26 +08002958 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002959 goto fail;
2960
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002961 iter->tr = tr;
2962
2963#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002964 /* Currently only the top directory has a snapshot */
2965 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002966 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002968#endif
2969 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002970 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002971 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002972 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002973 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002974
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002975 /* Notify the tracer early; before we stop tracing. */
2976 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002977 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002978
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002979 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002980 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002981 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2982
David Sharp8be07092012-11-13 12:18:22 -08002983 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002984 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002985 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2986
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002987 /* stop the trace while dumping if we are not opening "snapshot" */
2988 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002989 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002990
Steven Rostedtae3b5092013-01-23 15:22:59 -05002991 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002992 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002993 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002994 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002995 }
2996 ring_buffer_read_prepare_sync();
2997 for_each_tracing_cpu(cpu) {
2998 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002999 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003000 }
3001 } else {
3002 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003003 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003004 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003005 ring_buffer_read_prepare_sync();
3006 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003007 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003008 }
3009
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003010 mutex_unlock(&trace_types_lock);
3011
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003012 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003013
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003014 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003015 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003016 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003017 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003018release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003019 seq_release_private(inode, file);
3020 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021}
3022
3023int tracing_open_generic(struct inode *inode, struct file *filp)
3024{
Steven Rostedt60a11772008-05-12 21:20:44 +02003025 if (tracing_disabled)
3026 return -ENODEV;
3027
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003028 filp->private_data = inode->i_private;
3029 return 0;
3030}
3031
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003032bool tracing_is_disabled(void)
3033{
3034 return (tracing_disabled) ? true: false;
3035}
3036
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003037/*
3038 * Open and update trace_array ref count.
3039 * Must have the current trace_array passed to it.
3040 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003041static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003042{
3043 struct trace_array *tr = inode->i_private;
3044
3045 if (tracing_disabled)
3046 return -ENODEV;
3047
3048 if (trace_array_get(tr) < 0)
3049 return -ENODEV;
3050
3051 filp->private_data = inode->i_private;
3052
3053 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003054}
3055
Hannes Eder4fd27352009-02-10 19:44:12 +01003056static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003058 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003059 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003060 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003061 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003062
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003063 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003064 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003065 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003066 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003067
Oleg Nesterov6484c712013-07-23 17:26:10 +02003068 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003069 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003070 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003071
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003072 for_each_tracing_cpu(cpu) {
3073 if (iter->buffer_iter[cpu])
3074 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3075 }
3076
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077 if (iter->trace && iter->trace->close)
3078 iter->trace->close(iter);
3079
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003080 if (!iter->snapshot)
3081 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003082 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003083
3084 __trace_array_put(tr);
3085
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003086 mutex_unlock(&trace_types_lock);
3087
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003088 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003089 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003090 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003091 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003092 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003093
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003094 return 0;
3095}
3096
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003097static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3098{
3099 struct trace_array *tr = inode->i_private;
3100
3101 trace_array_put(tr);
3102 return 0;
3103}
3104
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003105static int tracing_single_release_tr(struct inode *inode, struct file *file)
3106{
3107 struct trace_array *tr = inode->i_private;
3108
3109 trace_array_put(tr);
3110
3111 return single_release(inode, file);
3112}
3113
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114static int tracing_open(struct inode *inode, struct file *file)
3115{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003116 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003117 struct trace_iterator *iter;
3118 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003119
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003120 if (trace_array_get(tr) < 0)
3121 return -ENODEV;
3122
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003123 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003124 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3125 int cpu = tracing_get_cpu(inode);
3126
3127 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003128 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003129 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003130 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003131 }
3132
3133 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003134 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003135 if (IS_ERR(iter))
3136 ret = PTR_ERR(iter);
3137 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3138 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3139 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003140
3141 if (ret < 0)
3142 trace_array_put(tr);
3143
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144 return ret;
3145}
3146
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003147/*
3148 * Some tracers are not suitable for instance buffers.
3149 * A tracer is always available for the global array (toplevel)
3150 * or if it explicitly states that it is.
3151 */
3152static bool
3153trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3154{
3155 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3156}
3157
3158/* Find the next tracer that this trace array may use */
3159static struct tracer *
3160get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3161{
3162 while (t && !trace_ok_for_array(t, tr))
3163 t = t->next;
3164
3165 return t;
3166}
3167
Ingo Molnare309b412008-05-12 21:20:51 +02003168static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169t_next(struct seq_file *m, void *v, loff_t *pos)
3170{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003171 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003172 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003173
3174 (*pos)++;
3175
3176 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003177 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003179 return t;
3180}
3181
3182static void *t_start(struct seq_file *m, loff_t *pos)
3183{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003184 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003185 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003186 loff_t l = 0;
3187
3188 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003189
3190 t = get_tracer_for_array(tr, trace_types);
3191 for (; t && l < *pos; t = t_next(m, t, &l))
3192 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003193
3194 return t;
3195}
3196
3197static void t_stop(struct seq_file *m, void *p)
3198{
3199 mutex_unlock(&trace_types_lock);
3200}
3201
3202static int t_show(struct seq_file *m, void *v)
3203{
3204 struct tracer *t = v;
3205
3206 if (!t)
3207 return 0;
3208
3209 seq_printf(m, "%s", t->name);
3210 if (t->next)
3211 seq_putc(m, ' ');
3212 else
3213 seq_putc(m, '\n');
3214
3215 return 0;
3216}
3217
James Morris88e9d342009-09-22 16:43:43 -07003218static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003219 .start = t_start,
3220 .next = t_next,
3221 .stop = t_stop,
3222 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223};
3224
3225static int show_traces_open(struct inode *inode, struct file *file)
3226{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003227 struct trace_array *tr = inode->i_private;
3228 struct seq_file *m;
3229 int ret;
3230
Steven Rostedt60a11772008-05-12 21:20:44 +02003231 if (tracing_disabled)
3232 return -ENODEV;
3233
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003234 ret = seq_open(file, &show_traces_seq_ops);
3235 if (ret)
3236 return ret;
3237
3238 m = file->private_data;
3239 m->private = tr;
3240
3241 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242}
3243
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003244static ssize_t
3245tracing_write_stub(struct file *filp, const char __user *ubuf,
3246 size_t count, loff_t *ppos)
3247{
3248 return count;
3249}
3250
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003251loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003252{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003253 int ret;
3254
Slava Pestov364829b2010-11-24 15:13:16 -08003255 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003256 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003257 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003258 file->f_pos = ret = 0;
3259
3260 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003261}
3262
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003263static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003264 .open = tracing_open,
3265 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003266 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003267 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003268 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003269};
3270
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003271static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003272 .open = show_traces_open,
3273 .read = seq_read,
3274 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003275 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003276};
3277
Ingo Molnar36dfe922008-05-12 21:20:52 +02003278/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003279 * The tracer itself will not take this lock, but still we want
3280 * to provide a consistent cpumask to user-space:
3281 */
3282static DEFINE_MUTEX(tracing_cpumask_update_lock);
3283
3284/*
3285 * Temporary storage for the character representation of the
3286 * CPU bitmask (and one more byte for the newline):
3287 */
3288static char mask_str[NR_CPUS + 1];
3289
Ingo Molnarc7078de2008-05-12 21:20:52 +02003290static ssize_t
3291tracing_cpumask_read(struct file *filp, char __user *ubuf,
3292 size_t count, loff_t *ppos)
3293{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003294 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003295 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003296
3297 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003298
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003299 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003300 if (count - len < 2) {
3301 count = -EINVAL;
3302 goto out_err;
3303 }
3304 len += sprintf(mask_str + len, "\n");
3305 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3306
3307out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003308 mutex_unlock(&tracing_cpumask_update_lock);
3309
3310 return count;
3311}
3312
3313static ssize_t
3314tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3315 size_t count, loff_t *ppos)
3316{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003317 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303318 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003319 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303320
3321 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3322 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003323
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303324 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003325 if (err)
3326 goto err_unlock;
3327
Li Zefan215368e2009-06-15 10:56:42 +08003328 mutex_lock(&tracing_cpumask_update_lock);
3329
Steven Rostedta5e25882008-12-02 15:34:05 -05003330 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003331 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003332 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003333 /*
3334 * Increase/decrease the disabled counter if we are
3335 * about to flip a bit in the cpumask:
3336 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003337 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303338 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003339 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3340 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003341 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003342 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303343 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003344 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3345 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003346 }
3347 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003348 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003349 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003350
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003351 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003352
Ingo Molnarc7078de2008-05-12 21:20:52 +02003353 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303354 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355
Ingo Molnarc7078de2008-05-12 21:20:52 +02003356 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003357
3358err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003359 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003360
3361 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003362}
3363
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003364static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003365 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003366 .read = tracing_cpumask_read,
3367 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003368 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003369 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003370};
3371
Li Zefanfdb372e2009-12-08 11:15:59 +08003372static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003373{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003374 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003375 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003376 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003377 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003378
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003379 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003380 tracer_flags = tr->current_trace->flags->val;
3381 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003382
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003383 for (i = 0; trace_options[i]; i++) {
3384 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003385 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003387 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003388 }
3389
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003390 for (i = 0; trace_opts[i].name; i++) {
3391 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003392 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003393 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003394 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003395 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003396 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003397
Li Zefanfdb372e2009-12-08 11:15:59 +08003398 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003399}
3400
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003401static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003402 struct tracer_flags *tracer_flags,
3403 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003404{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003405 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003406 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003407
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003408 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003409 if (ret)
3410 return ret;
3411
3412 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003413 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003414 else
Zhaolei77708412009-08-07 18:53:21 +08003415 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003416 return 0;
3417}
3418
Li Zefan8d18eaa2009-12-08 11:17:06 +08003419/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003420static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003421{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003422 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003423 struct tracer_flags *tracer_flags = trace->flags;
3424 struct tracer_opt *opts = NULL;
3425 int i;
3426
3427 for (i = 0; tracer_flags->opts[i].name; i++) {
3428 opts = &tracer_flags->opts[i];
3429
3430 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003431 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003432 }
3433
3434 return -EINVAL;
3435}
3436
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003437/* Some tracers require overwrite to stay enabled */
3438int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3439{
3440 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3441 return -1;
3442
3443 return 0;
3444}
3445
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003446int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003447{
3448 /* do nothing if flag is already set */
3449 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003450 return 0;
3451
3452 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003453 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003454 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003455 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003456
3457 if (enabled)
3458 trace_flags |= mask;
3459 else
3460 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003461
3462 if (mask == TRACE_ITER_RECORD_CMD)
3463 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003464
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003465 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003466 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003467#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003468 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003469#endif
3470 }
Steven Rostedt81698832012-10-11 10:15:05 -04003471
3472 if (mask == TRACE_ITER_PRINTK)
3473 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003474
3475 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003476}
3477
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003478static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003479{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003480 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003481 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003482 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003483 int i;
3484
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003485 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003486
Li Zefan8d18eaa2009-12-08 11:17:06 +08003487 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488 neg = 1;
3489 cmp += 2;
3490 }
3491
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003492 mutex_lock(&trace_types_lock);
3493
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003494 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003495 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003496 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003497 break;
3498 }
3499 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003500
3501 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003502 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003503 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003504
3505 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003506
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003507 return ret;
3508}
3509
3510static ssize_t
3511tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3512 size_t cnt, loff_t *ppos)
3513{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003514 struct seq_file *m = filp->private_data;
3515 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003516 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003517 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003518
3519 if (cnt >= sizeof(buf))
3520 return -EINVAL;
3521
3522 if (copy_from_user(&buf, ubuf, cnt))
3523 return -EFAULT;
3524
Steven Rostedta8dd2172013-01-09 20:54:17 -05003525 buf[cnt] = 0;
3526
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003527 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003528 if (ret < 0)
3529 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003530
Jiri Olsacf8517c2009-10-23 19:36:16 -04003531 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003532
3533 return cnt;
3534}
3535
Li Zefanfdb372e2009-12-08 11:15:59 +08003536static int tracing_trace_options_open(struct inode *inode, struct file *file)
3537{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003538 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003539 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003540
Li Zefanfdb372e2009-12-08 11:15:59 +08003541 if (tracing_disabled)
3542 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003543
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003544 if (trace_array_get(tr) < 0)
3545 return -ENODEV;
3546
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003547 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3548 if (ret < 0)
3549 trace_array_put(tr);
3550
3551 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003552}
3553
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003554static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003555 .open = tracing_trace_options_open,
3556 .read = seq_read,
3557 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003558 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003559 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003560};
3561
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003562static const char readme_msg[] =
3563 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003564 "# echo 0 > tracing_on : quick way to disable tracing\n"
3565 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3566 " Important files:\n"
3567 " trace\t\t\t- The static contents of the buffer\n"
3568 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3569 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3570 " current_tracer\t- function and latency tracers\n"
3571 " available_tracers\t- list of configured tracers for current_tracer\n"
3572 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3573 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3574 " trace_clock\t\t-change the clock used to order events\n"
3575 " local: Per cpu clock but may not be synced across CPUs\n"
3576 " global: Synced across CPUs but slows tracing down.\n"
3577 " counter: Not a clock, but just an increment\n"
3578 " uptime: Jiffy counter from time of boot\n"
3579 " perf: Same clock that perf events use\n"
3580#ifdef CONFIG_X86_64
3581 " x86-tsc: TSC cycle counter\n"
3582#endif
3583 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3584 " tracing_cpumask\t- Limit which CPUs to trace\n"
3585 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3586 "\t\t\t Remove sub-buffer with rmdir\n"
3587 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003588 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3589 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003590#ifdef CONFIG_DYNAMIC_FTRACE
3591 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003592 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3593 "\t\t\t functions\n"
3594 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3595 "\t modules: Can select a group via module\n"
3596 "\t Format: :mod:<module-name>\n"
3597 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3598 "\t triggers: a command to perform when function is hit\n"
3599 "\t Format: <function>:<trigger>[:count]\n"
3600 "\t trigger: traceon, traceoff\n"
3601 "\t\t enable_event:<system>:<event>\n"
3602 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003603#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003604 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003605#endif
3606#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003607 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003608#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003609 "\t\t dump\n"
3610 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003611 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3612 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3613 "\t The first one will disable tracing every time do_fault is hit\n"
3614 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3615 "\t The first time do trap is hit and it disables tracing, the\n"
3616 "\t counter will decrement to 2. If tracing is already disabled,\n"
3617 "\t the counter will not decrement. It only decrements when the\n"
3618 "\t trigger did work\n"
3619 "\t To remove trigger without count:\n"
3620 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3621 "\t To remove trigger with a count:\n"
3622 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003623 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003624 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3625 "\t modules: Can select a group via module command :mod:\n"
3626 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003627#endif /* CONFIG_DYNAMIC_FTRACE */
3628#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003629 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3630 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003631#endif
3632#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3633 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3634 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3635#endif
3636#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003637 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3638 "\t\t\t snapshot buffer. Read the contents for more\n"
3639 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003640#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003641#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003642 " stack_trace\t\t- Shows the max stack trace when active\n"
3643 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003644 "\t\t\t Write into this file to reset the max size (trigger a\n"
3645 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003646#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003647 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3648 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003649#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003650#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003651 " events/\t\t- Directory containing all trace event subsystems:\n"
3652 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3653 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003654 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3655 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003656 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003657 " events/<system>/<event>/\t- Directory containing control files for\n"
3658 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003659 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3660 " filter\t\t- If set, only events passing filter are traced\n"
3661 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003662 "\t Format: <trigger>[:count][if <filter>]\n"
3663 "\t trigger: traceon, traceoff\n"
3664 "\t enable_event:<system>:<event>\n"
3665 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003666#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003667 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003668#endif
3669#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003670 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003671#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003672 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3673 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3674 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3675 "\t events/block/block_unplug/trigger\n"
3676 "\t The first disables tracing every time block_unplug is hit.\n"
3677 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3678 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3679 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3680 "\t Like function triggers, the counter is only decremented if it\n"
3681 "\t enabled or disabled tracing.\n"
3682 "\t To remove a trigger without a count:\n"
3683 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3684 "\t To remove a trigger with a count:\n"
3685 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3686 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003687;
3688
3689static ssize_t
3690tracing_readme_read(struct file *filp, char __user *ubuf,
3691 size_t cnt, loff_t *ppos)
3692{
3693 return simple_read_from_buffer(ubuf, cnt, ppos,
3694 readme_msg, strlen(readme_msg));
3695}
3696
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003697static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003698 .open = tracing_open_generic,
3699 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003700 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003701};
3702
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003703static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003704{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003705 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003706
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003707 if (*pos || m->count)
3708 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003709
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003710 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003711
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003712 for (; ptr < &map_cmdline_to_pid[SAVED_CMDLINES]; ptr++) {
3713 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003714 continue;
3715
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003716 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003717 }
3718
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003719 return NULL;
3720}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003721
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003722static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3723{
3724 void *v;
3725 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003726
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003727 v = &map_cmdline_to_pid[0];
3728 while (l <= *pos) {
3729 v = saved_cmdlines_next(m, v, &l);
3730 if (!v)
3731 return NULL;
3732 }
3733
3734 return v;
3735}
3736
3737static void saved_cmdlines_stop(struct seq_file *m, void *v)
3738{
3739}
3740
3741static int saved_cmdlines_show(struct seq_file *m, void *v)
3742{
3743 char buf[TASK_COMM_LEN];
3744 unsigned int *pid = v;
3745
3746 trace_find_cmdline(*pid, buf);
3747 seq_printf(m, "%d %s\n", *pid, buf);
3748 return 0;
3749}
3750
3751static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3752 .start = saved_cmdlines_start,
3753 .next = saved_cmdlines_next,
3754 .stop = saved_cmdlines_stop,
3755 .show = saved_cmdlines_show,
3756};
3757
3758static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3759{
3760 if (tracing_disabled)
3761 return -ENODEV;
3762
3763 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003764}
3765
3766static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003767 .open = tracing_saved_cmdlines_open,
3768 .read = seq_read,
3769 .llseek = seq_lseek,
3770 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003771};
3772
3773static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003774tracing_set_trace_read(struct file *filp, char __user *ubuf,
3775 size_t cnt, loff_t *ppos)
3776{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003777 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003778 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003779 int r;
3780
3781 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003782 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003783 mutex_unlock(&trace_types_lock);
3784
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003785 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003786}
3787
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003788int tracer_init(struct tracer *t, struct trace_array *tr)
3789{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003790 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003791 return t->init(tr);
3792}
3793
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003794static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003795{
3796 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003797
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003798 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003799 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003800}
3801
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003802#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003803/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003804static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3805 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003806{
3807 int cpu, ret = 0;
3808
3809 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3810 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003811 ret = ring_buffer_resize(trace_buf->buffer,
3812 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003813 if (ret < 0)
3814 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003815 per_cpu_ptr(trace_buf->data, cpu)->entries =
3816 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003817 }
3818 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003819 ret = ring_buffer_resize(trace_buf->buffer,
3820 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003821 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003822 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3823 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003824 }
3825
3826 return ret;
3827}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003828#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003829
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003830static int __tracing_resize_ring_buffer(struct trace_array *tr,
3831 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003832{
3833 int ret;
3834
3835 /*
3836 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003837 * we use the size that was given, and we can forget about
3838 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003839 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003840 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003841
Steven Rostedtb382ede62012-10-10 21:44:34 -04003842 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003843 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003844 return 0;
3845
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003846 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003847 if (ret < 0)
3848 return ret;
3849
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003850#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003851 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3852 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003853 goto out;
3854
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003855 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003856 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003857 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3858 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003859 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003860 /*
3861 * AARGH! We are left with different
3862 * size max buffer!!!!
3863 * The max buffer is our "snapshot" buffer.
3864 * When a tracer needs a snapshot (one of the
3865 * latency tracers), it swaps the max buffer
3866 * with the saved snap shot. We succeeded to
3867 * update the size of the main buffer, but failed to
3868 * update the size of the max buffer. But when we tried
3869 * to reset the main buffer to the original size, we
3870 * failed there too. This is very unlikely to
3871 * happen, but if it does, warn and kill all
3872 * tracing.
3873 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003874 WARN_ON(1);
3875 tracing_disabled = 1;
3876 }
3877 return ret;
3878 }
3879
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003880 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003881 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003882 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003883 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003884
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003885 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003886#endif /* CONFIG_TRACER_MAX_TRACE */
3887
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003888 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003889 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003890 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003891 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003892
3893 return ret;
3894}
3895
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003896static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3897 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003898{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003899 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003900
3901 mutex_lock(&trace_types_lock);
3902
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003903 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3904 /* make sure, this cpu is enabled in the mask */
3905 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3906 ret = -EINVAL;
3907 goto out;
3908 }
3909 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003910
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003911 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003912 if (ret < 0)
3913 ret = -ENOMEM;
3914
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003915out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003916 mutex_unlock(&trace_types_lock);
3917
3918 return ret;
3919}
3920
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003921
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003922/**
3923 * tracing_update_buffers - used by tracing facility to expand ring buffers
3924 *
3925 * To save on memory when the tracing is never used on a system with it
3926 * configured in. The ring buffers are set to a minimum size. But once
3927 * a user starts to use the tracing facility, then they need to grow
3928 * to their default size.
3929 *
3930 * This function is to be called when a tracer is about to be used.
3931 */
3932int tracing_update_buffers(void)
3933{
3934 int ret = 0;
3935
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003936 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003937 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003938 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003939 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003940 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003941
3942 return ret;
3943}
3944
Steven Rostedt577b7852009-02-26 23:43:05 -05003945struct trace_option_dentry;
3946
3947static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003948create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003949
3950static void
3951destroy_trace_option_files(struct trace_option_dentry *topts);
3952
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003953/*
3954 * Used to clear out the tracer before deletion of an instance.
3955 * Must have trace_types_lock held.
3956 */
3957static void tracing_set_nop(struct trace_array *tr)
3958{
3959 if (tr->current_trace == &nop_trace)
3960 return;
3961
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003962 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003963
3964 if (tr->current_trace->reset)
3965 tr->current_trace->reset(tr);
3966
3967 tr->current_trace = &nop_trace;
3968}
3969
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003970static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003971{
Steven Rostedt577b7852009-02-26 23:43:05 -05003972 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003973 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003974#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003975 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003976#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003977 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003978
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003979 mutex_lock(&trace_types_lock);
3980
Steven Rostedt73c51622009-03-11 13:42:01 -04003981 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003983 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003985 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003986 ret = 0;
3987 }
3988
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003989 for (t = trace_types; t; t = t->next) {
3990 if (strcmp(t->name, buf) == 0)
3991 break;
3992 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003993 if (!t) {
3994 ret = -EINVAL;
3995 goto out;
3996 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003997 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003998 goto out;
3999
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004000 /* Some tracers are only allowed for the top level buffer */
4001 if (!trace_ok_for_array(t, tr)) {
4002 ret = -EINVAL;
4003 goto out;
4004 }
4005
Steven Rostedt9f029e82008-11-12 15:24:24 -05004006 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004007
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004008 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004009
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004010 if (tr->current_trace->reset)
4011 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004012
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004013 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004014 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004015
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004016#ifdef CONFIG_TRACER_MAX_TRACE
4017 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004018
4019 if (had_max_tr && !t->use_max_tr) {
4020 /*
4021 * We need to make sure that the update_max_tr sees that
4022 * current_trace changed to nop_trace to keep it from
4023 * swapping the buffers after we resize it.
4024 * The update_max_tr is called from interrupts disabled
4025 * so a synchronized_sched() is sufficient.
4026 */
4027 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004028 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004029 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004030#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004031 /* Currently, only the top instance has options */
4032 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4033 destroy_trace_option_files(topts);
4034 topts = create_trace_option_files(tr, t);
4035 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004036
4037#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004038 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004039 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004040 if (ret < 0)
4041 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004042 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004043#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004044
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004045 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004046 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004047 if (ret)
4048 goto out;
4049 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004050
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004051 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004052 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004053 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004054 out:
4055 mutex_unlock(&trace_types_lock);
4056
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004057 return ret;
4058}
4059
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004060static ssize_t
4061tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4062 size_t cnt, loff_t *ppos)
4063{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004064 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004065 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004066 int i;
4067 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004068 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004069
Steven Rostedt60063a62008-10-28 10:44:24 -04004070 ret = cnt;
4071
Li Zefanee6c2c12009-09-18 14:06:47 +08004072 if (cnt > MAX_TRACER_SIZE)
4073 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004074
4075 if (copy_from_user(&buf, ubuf, cnt))
4076 return -EFAULT;
4077
4078 buf[cnt] = 0;
4079
4080 /* strip ending whitespace. */
4081 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4082 buf[i] = 0;
4083
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004084 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004085 if (err)
4086 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004087
Jiri Olsacf8517c2009-10-23 19:36:16 -04004088 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004089
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004090 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004091}
4092
4093static ssize_t
4094tracing_max_lat_read(struct file *filp, char __user *ubuf,
4095 size_t cnt, loff_t *ppos)
4096{
4097 unsigned long *ptr = filp->private_data;
4098 char buf[64];
4099 int r;
4100
Steven Rostedtcffae432008-05-12 21:21:00 +02004101 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004102 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004103 if (r > sizeof(buf))
4104 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004105 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106}
4107
4108static ssize_t
4109tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4110 size_t cnt, loff_t *ppos)
4111{
Hannes Eder5e398412009-02-10 19:44:34 +01004112 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004113 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004114 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004115
Peter Huewe22fe9b52011-06-07 21:58:27 +02004116 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4117 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004118 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004119
4120 *ptr = val * 1000;
4121
4122 return cnt;
4123}
4124
Steven Rostedtb3806b42008-05-12 21:20:46 +02004125static int tracing_open_pipe(struct inode *inode, struct file *filp)
4126{
Oleg Nesterov15544202013-07-23 17:25:57 +02004127 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004128 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004129 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004130
4131 if (tracing_disabled)
4132 return -ENODEV;
4133
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004134 if (trace_array_get(tr) < 0)
4135 return -ENODEV;
4136
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004137 mutex_lock(&trace_types_lock);
4138
Steven Rostedtb3806b42008-05-12 21:20:46 +02004139 /* create a buffer to store the information to pass to userspace */
4140 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004141 if (!iter) {
4142 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004143 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004144 goto out;
4145 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004146
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004147 /*
4148 * We make a copy of the current tracer to avoid concurrent
4149 * changes on it while we are reading.
4150 */
4151 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4152 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004153 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004154 goto fail;
4155 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004156 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004157
4158 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4159 ret = -ENOMEM;
4160 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304161 }
4162
Steven Rostedta3097202008-11-07 22:36:02 -05004163 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304164 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004165
Steven Rostedt112f38a72009-06-01 15:16:05 -04004166 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4167 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4168
David Sharp8be07092012-11-13 12:18:22 -08004169 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004170 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004171 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4172
Oleg Nesterov15544202013-07-23 17:25:57 +02004173 iter->tr = tr;
4174 iter->trace_buffer = &tr->trace_buffer;
4175 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004176 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004177 filp->private_data = iter;
4178
Steven Rostedt107bad82008-05-12 21:21:01 +02004179 if (iter->trace->pipe_open)
4180 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004181
Arnd Bergmannb4447862010-07-07 23:40:11 +02004182 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004183out:
4184 mutex_unlock(&trace_types_lock);
4185 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004186
4187fail:
4188 kfree(iter->trace);
4189 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004190 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004191 mutex_unlock(&trace_types_lock);
4192 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004193}
4194
4195static int tracing_release_pipe(struct inode *inode, struct file *file)
4196{
4197 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004198 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004199
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004200 mutex_lock(&trace_types_lock);
4201
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004202 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004203 iter->trace->pipe_close(iter);
4204
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004205 mutex_unlock(&trace_types_lock);
4206
Rusty Russell44623442009-01-01 10:12:23 +10304207 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004208 mutex_destroy(&iter->mutex);
4209 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004210 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004211
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004212 trace_array_put(tr);
4213
Steven Rostedtb3806b42008-05-12 21:20:46 +02004214 return 0;
4215}
4216
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004217static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004218trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004219{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004220 /* Iterators are static, they should be filled or empty */
4221 if (trace_buffer_iter(iter, iter->cpu_file))
4222 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004223
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004224 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004225 /*
4226 * Always select as readable when in blocking mode
4227 */
4228 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004229 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004230 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004231 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004232}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004233
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004234static unsigned int
4235tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4236{
4237 struct trace_iterator *iter = filp->private_data;
4238
4239 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004240}
4241
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004242/* Must be called with trace_types_lock mutex held. */
4243static int tracing_wait_pipe(struct file *filp)
4244{
4245 struct trace_iterator *iter = filp->private_data;
4246
4247 while (trace_empty(iter)) {
4248
4249 if ((filp->f_flags & O_NONBLOCK)) {
4250 return -EAGAIN;
4251 }
4252
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004253 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004254 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004255 * We still block if tracing is disabled, but we have never
4256 * read anything. This allows a user to cat this file, and
4257 * then enable tracing. But after we have read something,
4258 * we give an EOF when tracing is again disabled.
4259 *
4260 * iter->pos will be 0 if we haven't read anything.
4261 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004262 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004263 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004264
4265 mutex_unlock(&iter->mutex);
4266
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04004267 wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004268
4269 mutex_lock(&iter->mutex);
4270
4271 if (signal_pending(current))
4272 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004273 }
4274
4275 return 1;
4276}
4277
Steven Rostedtb3806b42008-05-12 21:20:46 +02004278/*
4279 * Consumer reader.
4280 */
4281static ssize_t
4282tracing_read_pipe(struct file *filp, char __user *ubuf,
4283 size_t cnt, loff_t *ppos)
4284{
4285 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004286 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004287 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004288
4289 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004290 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4291 if (sret != -EBUSY)
4292 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293
Steven Rostedtf9520752009-03-02 14:04:40 -05004294 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004295
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004296 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004297 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004298 if (unlikely(iter->trace->name != tr->current_trace->name))
4299 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004300 mutex_unlock(&trace_types_lock);
4301
4302 /*
4303 * Avoid more than one consumer on a single file descriptor
4304 * This is just a matter of traces coherency, the ring buffer itself
4305 * is protected.
4306 */
4307 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004308 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004309 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4310 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004311 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004312 }
4313
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004314waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004315 sret = tracing_wait_pipe(filp);
4316 if (sret <= 0)
4317 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004318
4319 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004320 if (trace_empty(iter)) {
4321 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004322 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004323 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004324
4325 if (cnt >= PAGE_SIZE)
4326 cnt = PAGE_SIZE - 1;
4327
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004328 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004329 memset(&iter->seq, 0,
4330 sizeof(struct trace_iterator) -
4331 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004332 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004333 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004334
Lai Jiangshan4f535962009-05-18 19:35:34 +08004335 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004336 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004337 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004338 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004339 int len = iter->seq.len;
4340
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004341 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004342 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004343 /* don't print partial lines */
4344 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004345 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004346 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004347 if (ret != TRACE_TYPE_NO_CONSUME)
4348 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004349
4350 if (iter->seq.len >= cnt)
4351 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004352
4353 /*
4354 * Setting the full flag means we reached the trace_seq buffer
4355 * size and we should leave by partial output condition above.
4356 * One of the trace_seq_* functions is not used properly.
4357 */
4358 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4359 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004360 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004361 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004362 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004363
Steven Rostedtb3806b42008-05-12 21:20:46 +02004364 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004365 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4366 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004367 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004368
4369 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004370 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004371 * entries, go back to wait for more entries.
4372 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004373 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004374 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004375
Steven Rostedt107bad82008-05-12 21:21:01 +02004376out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004377 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004378
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004379 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004380}
4381
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004382static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4383 unsigned int idx)
4384{
4385 __free_page(spd->pages[idx]);
4386}
4387
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004388static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004389 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004390 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004391 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004392 .steal = generic_pipe_buf_steal,
4393 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004394};
4395
Steven Rostedt34cd4992009-02-09 12:06:29 -05004396static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004397tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004398{
4399 size_t count;
4400 int ret;
4401
4402 /* Seq buffer is page-sized, exactly what we need. */
4403 for (;;) {
4404 count = iter->seq.len;
4405 ret = print_trace_line(iter);
4406 count = iter->seq.len - count;
4407 if (rem < count) {
4408 rem = 0;
4409 iter->seq.len -= count;
4410 break;
4411 }
4412 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4413 iter->seq.len -= count;
4414 break;
4415 }
4416
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004417 if (ret != TRACE_TYPE_NO_CONSUME)
4418 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004419 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004420 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004421 rem = 0;
4422 iter->ent = NULL;
4423 break;
4424 }
4425 }
4426
4427 return rem;
4428}
4429
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004430static ssize_t tracing_splice_read_pipe(struct file *filp,
4431 loff_t *ppos,
4432 struct pipe_inode_info *pipe,
4433 size_t len,
4434 unsigned int flags)
4435{
Jens Axboe35f3d142010-05-20 10:43:18 +02004436 struct page *pages_def[PIPE_DEF_BUFFERS];
4437 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004438 struct trace_iterator *iter = filp->private_data;
4439 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004440 .pages = pages_def,
4441 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004442 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004443 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004444 .flags = flags,
4445 .ops = &tracing_pipe_buf_ops,
4446 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004447 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004448 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004449 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004450 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004451 unsigned int i;
4452
Jens Axboe35f3d142010-05-20 10:43:18 +02004453 if (splice_grow_spd(pipe, &spd))
4454 return -ENOMEM;
4455
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004456 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004457 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004458 if (unlikely(iter->trace->name != tr->current_trace->name))
4459 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004460 mutex_unlock(&trace_types_lock);
4461
4462 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004463
4464 if (iter->trace->splice_read) {
4465 ret = iter->trace->splice_read(iter, filp,
4466 ppos, pipe, len, flags);
4467 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004468 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004469 }
4470
4471 ret = tracing_wait_pipe(filp);
4472 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004473 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004474
Jason Wessel955b61e2010-08-05 09:22:23 -05004475 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004476 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004477 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004478 }
4479
Lai Jiangshan4f535962009-05-18 19:35:34 +08004480 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004481 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004482
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004483 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004484 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004485 spd.pages[i] = alloc_page(GFP_KERNEL);
4486 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004487 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004488
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004489 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004490
4491 /* Copy the data into the page, so we can start over. */
4492 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004493 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004494 iter->seq.len);
4495 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004496 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004497 break;
4498 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004499 spd.partial[i].offset = 0;
4500 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004501
Steven Rostedtf9520752009-03-02 14:04:40 -05004502 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004503 }
4504
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004505 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004506 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004507 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004508
4509 spd.nr_pages = i;
4510
Jens Axboe35f3d142010-05-20 10:43:18 +02004511 ret = splice_to_pipe(pipe, &spd);
4512out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004513 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004514 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004515
Steven Rostedt34cd4992009-02-09 12:06:29 -05004516out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004517 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004518 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004519}
4520
Steven Rostedta98a3c32008-05-12 21:20:59 +02004521static ssize_t
4522tracing_entries_read(struct file *filp, char __user *ubuf,
4523 size_t cnt, loff_t *ppos)
4524{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004525 struct inode *inode = file_inode(filp);
4526 struct trace_array *tr = inode->i_private;
4527 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004528 char buf[64];
4529 int r = 0;
4530 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004531
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004532 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004533
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004534 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004535 int cpu, buf_size_same;
4536 unsigned long size;
4537
4538 size = 0;
4539 buf_size_same = 1;
4540 /* check if all cpu sizes are same */
4541 for_each_tracing_cpu(cpu) {
4542 /* fill in the size from first enabled cpu */
4543 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004544 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4545 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004546 buf_size_same = 0;
4547 break;
4548 }
4549 }
4550
4551 if (buf_size_same) {
4552 if (!ring_buffer_expanded)
4553 r = sprintf(buf, "%lu (expanded: %lu)\n",
4554 size >> 10,
4555 trace_buf_size >> 10);
4556 else
4557 r = sprintf(buf, "%lu\n", size >> 10);
4558 } else
4559 r = sprintf(buf, "X\n");
4560 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004561 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004562
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004563 mutex_unlock(&trace_types_lock);
4564
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004565 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4566 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004567}
4568
4569static ssize_t
4570tracing_entries_write(struct file *filp, const char __user *ubuf,
4571 size_t cnt, loff_t *ppos)
4572{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004573 struct inode *inode = file_inode(filp);
4574 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004575 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004576 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004577
Peter Huewe22fe9b52011-06-07 21:58:27 +02004578 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4579 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004580 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004581
4582 /* must have at least 1 entry */
4583 if (!val)
4584 return -EINVAL;
4585
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004586 /* value is in KB */
4587 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004588 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004589 if (ret < 0)
4590 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004591
Jiri Olsacf8517c2009-10-23 19:36:16 -04004592 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004593
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004594 return cnt;
4595}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004596
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004597static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004598tracing_total_entries_read(struct file *filp, char __user *ubuf,
4599 size_t cnt, loff_t *ppos)
4600{
4601 struct trace_array *tr = filp->private_data;
4602 char buf[64];
4603 int r, cpu;
4604 unsigned long size = 0, expanded_size = 0;
4605
4606 mutex_lock(&trace_types_lock);
4607 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004608 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004609 if (!ring_buffer_expanded)
4610 expanded_size += trace_buf_size >> 10;
4611 }
4612 if (ring_buffer_expanded)
4613 r = sprintf(buf, "%lu\n", size);
4614 else
4615 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4616 mutex_unlock(&trace_types_lock);
4617
4618 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4619}
4620
4621static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004622tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4623 size_t cnt, loff_t *ppos)
4624{
4625 /*
4626 * There is no need to read what the user has written, this function
4627 * is just to make sure that there is no error when "echo" is used
4628 */
4629
4630 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004631
4632 return cnt;
4633}
4634
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004635static int
4636tracing_free_buffer_release(struct inode *inode, struct file *filp)
4637{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004638 struct trace_array *tr = inode->i_private;
4639
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004640 /* disable tracing ? */
4641 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004642 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004643 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004644 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004645
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004646 trace_array_put(tr);
4647
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004648 return 0;
4649}
4650
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004651static ssize_t
4652tracing_mark_write(struct file *filp, const char __user *ubuf,
4653 size_t cnt, loff_t *fpos)
4654{
Steven Rostedtd696b582011-09-22 11:50:27 -04004655 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004656 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004657 struct ring_buffer_event *event;
4658 struct ring_buffer *buffer;
4659 struct print_entry *entry;
4660 unsigned long irq_flags;
4661 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004662 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004663 int nr_pages = 1;
4664 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004665 int offset;
4666 int size;
4667 int len;
4668 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004669 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004670
Steven Rostedtc76f0692008-11-07 22:36:02 -05004671 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004672 return -EINVAL;
4673
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004674 if (!(trace_flags & TRACE_ITER_MARKERS))
4675 return -EINVAL;
4676
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004677 if (cnt > TRACE_BUF_SIZE)
4678 cnt = TRACE_BUF_SIZE;
4679
Steven Rostedtd696b582011-09-22 11:50:27 -04004680 /*
4681 * Userspace is injecting traces into the kernel trace buffer.
4682 * We want to be as non intrusive as possible.
4683 * To do so, we do not want to allocate any special buffers
4684 * or take any locks, but instead write the userspace data
4685 * straight into the ring buffer.
4686 *
4687 * First we need to pin the userspace buffer into memory,
4688 * which, most likely it is, because it just referenced it.
4689 * But there's no guarantee that it is. By using get_user_pages_fast()
4690 * and kmap_atomic/kunmap_atomic() we can get access to the
4691 * pages directly. We then write the data directly into the
4692 * ring buffer.
4693 */
4694 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004695
Steven Rostedtd696b582011-09-22 11:50:27 -04004696 /* check if we cross pages */
4697 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4698 nr_pages = 2;
4699
4700 offset = addr & (PAGE_SIZE - 1);
4701 addr &= PAGE_MASK;
4702
4703 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4704 if (ret < nr_pages) {
4705 while (--ret >= 0)
4706 put_page(pages[ret]);
4707 written = -EFAULT;
4708 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004709 }
4710
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004711 for (i = 0; i < nr_pages; i++)
4712 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004713
4714 local_save_flags(irq_flags);
4715 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004716 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004717 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4718 irq_flags, preempt_count());
4719 if (!event) {
4720 /* Ring buffer disabled, return as if not open for write */
4721 written = -EBADF;
4722 goto out_unlock;
4723 }
4724
4725 entry = ring_buffer_event_data(event);
4726 entry->ip = _THIS_IP_;
4727
4728 if (nr_pages == 2) {
4729 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004730 memcpy(&entry->buf, map_page[0] + offset, len);
4731 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004732 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004733 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004734
4735 if (entry->buf[cnt - 1] != '\n') {
4736 entry->buf[cnt] = '\n';
4737 entry->buf[cnt + 1] = '\0';
4738 } else
4739 entry->buf[cnt] = '\0';
4740
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004741 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004742
4743 written = cnt;
4744
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004745 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004746
Steven Rostedtd696b582011-09-22 11:50:27 -04004747 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004748 for (i = 0; i < nr_pages; i++){
4749 kunmap_atomic(map_page[i]);
4750 put_page(pages[i]);
4751 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004752 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004753 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004754}
4755
Li Zefan13f16d22009-12-08 11:16:11 +08004756static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004757{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004758 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004759 int i;
4760
4761 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004762 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004763 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004764 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4765 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004766 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004767
Li Zefan13f16d22009-12-08 11:16:11 +08004768 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004769}
4770
Steven Rostedte1e232c2014-02-10 23:38:46 -05004771static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004772{
Zhaolei5079f322009-08-25 16:12:56 +08004773 int i;
4774
Zhaolei5079f322009-08-25 16:12:56 +08004775 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4776 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4777 break;
4778 }
4779 if (i == ARRAY_SIZE(trace_clocks))
4780 return -EINVAL;
4781
Zhaolei5079f322009-08-25 16:12:56 +08004782 mutex_lock(&trace_types_lock);
4783
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004784 tr->clock_id = i;
4785
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004786 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004787
David Sharp60303ed2012-10-11 16:27:52 -07004788 /*
4789 * New clock may not be consistent with the previous clock.
4790 * Reset the buffer so that it doesn't have incomparable timestamps.
4791 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004792 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004793
4794#ifdef CONFIG_TRACER_MAX_TRACE
4795 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4796 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004797 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004798#endif
David Sharp60303ed2012-10-11 16:27:52 -07004799
Zhaolei5079f322009-08-25 16:12:56 +08004800 mutex_unlock(&trace_types_lock);
4801
Steven Rostedte1e232c2014-02-10 23:38:46 -05004802 return 0;
4803}
4804
4805static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4806 size_t cnt, loff_t *fpos)
4807{
4808 struct seq_file *m = filp->private_data;
4809 struct trace_array *tr = m->private;
4810 char buf[64];
4811 const char *clockstr;
4812 int ret;
4813
4814 if (cnt >= sizeof(buf))
4815 return -EINVAL;
4816
4817 if (copy_from_user(&buf, ubuf, cnt))
4818 return -EFAULT;
4819
4820 buf[cnt] = 0;
4821
4822 clockstr = strstrip(buf);
4823
4824 ret = tracing_set_clock(tr, clockstr);
4825 if (ret)
4826 return ret;
4827
Zhaolei5079f322009-08-25 16:12:56 +08004828 *fpos += cnt;
4829
4830 return cnt;
4831}
4832
Li Zefan13f16d22009-12-08 11:16:11 +08004833static int tracing_clock_open(struct inode *inode, struct file *file)
4834{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004835 struct trace_array *tr = inode->i_private;
4836 int ret;
4837
Li Zefan13f16d22009-12-08 11:16:11 +08004838 if (tracing_disabled)
4839 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004840
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004841 if (trace_array_get(tr))
4842 return -ENODEV;
4843
4844 ret = single_open(file, tracing_clock_show, inode->i_private);
4845 if (ret < 0)
4846 trace_array_put(tr);
4847
4848 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004849}
4850
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004851struct ftrace_buffer_info {
4852 struct trace_iterator iter;
4853 void *spare;
4854 unsigned int read;
4855};
4856
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004857#ifdef CONFIG_TRACER_SNAPSHOT
4858static int tracing_snapshot_open(struct inode *inode, struct file *file)
4859{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004860 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004861 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004862 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004863 int ret = 0;
4864
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004865 if (trace_array_get(tr) < 0)
4866 return -ENODEV;
4867
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004868 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004869 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004870 if (IS_ERR(iter))
4871 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004872 } else {
4873 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004874 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004875 m = kzalloc(sizeof(*m), GFP_KERNEL);
4876 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004877 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004878 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4879 if (!iter) {
4880 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004881 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004882 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004883 ret = 0;
4884
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004885 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004886 iter->trace_buffer = &tr->max_buffer;
4887 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004888 m->private = iter;
4889 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004890 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004891out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004892 if (ret < 0)
4893 trace_array_put(tr);
4894
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004895 return ret;
4896}
4897
4898static ssize_t
4899tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4900 loff_t *ppos)
4901{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004902 struct seq_file *m = filp->private_data;
4903 struct trace_iterator *iter = m->private;
4904 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004905 unsigned long val;
4906 int ret;
4907
4908 ret = tracing_update_buffers();
4909 if (ret < 0)
4910 return ret;
4911
4912 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4913 if (ret)
4914 return ret;
4915
4916 mutex_lock(&trace_types_lock);
4917
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004918 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004919 ret = -EBUSY;
4920 goto out;
4921 }
4922
4923 switch (val) {
4924 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004925 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4926 ret = -EINVAL;
4927 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004928 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004929 if (tr->allocated_snapshot)
4930 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004931 break;
4932 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004933/* Only allow per-cpu swap if the ring buffer supports it */
4934#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4935 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4936 ret = -EINVAL;
4937 break;
4938 }
4939#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004940 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004941 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004942 if (ret < 0)
4943 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004944 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004945 local_irq_disable();
4946 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004947 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004948 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004949 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004950 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004951 local_irq_enable();
4952 break;
4953 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004954 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004955 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4956 tracing_reset_online_cpus(&tr->max_buffer);
4957 else
4958 tracing_reset(&tr->max_buffer, iter->cpu_file);
4959 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004960 break;
4961 }
4962
4963 if (ret >= 0) {
4964 *ppos += cnt;
4965 ret = cnt;
4966 }
4967out:
4968 mutex_unlock(&trace_types_lock);
4969 return ret;
4970}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004971
4972static int tracing_snapshot_release(struct inode *inode, struct file *file)
4973{
4974 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004975 int ret;
4976
4977 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004978
4979 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004980 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004981
4982 /* If write only, the seq_file is just a stub */
4983 if (m)
4984 kfree(m->private);
4985 kfree(m);
4986
4987 return 0;
4988}
4989
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004990static int tracing_buffers_open(struct inode *inode, struct file *filp);
4991static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4992 size_t count, loff_t *ppos);
4993static int tracing_buffers_release(struct inode *inode, struct file *file);
4994static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4995 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4996
4997static int snapshot_raw_open(struct inode *inode, struct file *filp)
4998{
4999 struct ftrace_buffer_info *info;
5000 int ret;
5001
5002 ret = tracing_buffers_open(inode, filp);
5003 if (ret < 0)
5004 return ret;
5005
5006 info = filp->private_data;
5007
5008 if (info->iter.trace->use_max_tr) {
5009 tracing_buffers_release(inode, filp);
5010 return -EBUSY;
5011 }
5012
5013 info->iter.snapshot = true;
5014 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5015
5016 return ret;
5017}
5018
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005019#endif /* CONFIG_TRACER_SNAPSHOT */
5020
5021
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005022static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005023 .open = tracing_open_generic,
5024 .read = tracing_max_lat_read,
5025 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005026 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005027};
5028
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005029static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005030 .open = tracing_open_generic,
5031 .read = tracing_set_trace_read,
5032 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005033 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005034};
5035
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005036static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005037 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005038 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005039 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005040 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005041 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005042 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005043};
5044
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005045static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005046 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005047 .read = tracing_entries_read,
5048 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005049 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005050 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005051};
5052
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005053static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005054 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005055 .read = tracing_total_entries_read,
5056 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005057 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005058};
5059
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005060static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005061 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005062 .write = tracing_free_buffer_write,
5063 .release = tracing_free_buffer_release,
5064};
5065
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005066static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005067 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005068 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005069 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005070 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005071};
5072
Zhaolei5079f322009-08-25 16:12:56 +08005073static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005074 .open = tracing_clock_open,
5075 .read = seq_read,
5076 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005077 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005078 .write = tracing_clock_write,
5079};
5080
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005081#ifdef CONFIG_TRACER_SNAPSHOT
5082static const struct file_operations snapshot_fops = {
5083 .open = tracing_snapshot_open,
5084 .read = seq_read,
5085 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005086 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005087 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005088};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005089
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005090static const struct file_operations snapshot_raw_fops = {
5091 .open = snapshot_raw_open,
5092 .read = tracing_buffers_read,
5093 .release = tracing_buffers_release,
5094 .splice_read = tracing_buffers_splice_read,
5095 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005096};
5097
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005098#endif /* CONFIG_TRACER_SNAPSHOT */
5099
Steven Rostedt2cadf912008-12-01 22:20:19 -05005100static int tracing_buffers_open(struct inode *inode, struct file *filp)
5101{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005102 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005103 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005104 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005105
5106 if (tracing_disabled)
5107 return -ENODEV;
5108
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005109 if (trace_array_get(tr) < 0)
5110 return -ENODEV;
5111
Steven Rostedt2cadf912008-12-01 22:20:19 -05005112 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005113 if (!info) {
5114 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005115 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005116 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005117
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005118 mutex_lock(&trace_types_lock);
5119
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005120 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005121 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005122 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005123 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005124 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005125 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005126 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005127
5128 filp->private_data = info;
5129
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005130 mutex_unlock(&trace_types_lock);
5131
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005132 ret = nonseekable_open(inode, filp);
5133 if (ret < 0)
5134 trace_array_put(tr);
5135
5136 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005137}
5138
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005139static unsigned int
5140tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5141{
5142 struct ftrace_buffer_info *info = filp->private_data;
5143 struct trace_iterator *iter = &info->iter;
5144
5145 return trace_poll(iter, filp, poll_table);
5146}
5147
Steven Rostedt2cadf912008-12-01 22:20:19 -05005148static ssize_t
5149tracing_buffers_read(struct file *filp, char __user *ubuf,
5150 size_t count, loff_t *ppos)
5151{
5152 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005153 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005154 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005155 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005156
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005157 if (!count)
5158 return 0;
5159
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005160 mutex_lock(&trace_types_lock);
5161
5162#ifdef CONFIG_TRACER_MAX_TRACE
5163 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5164 size = -EBUSY;
5165 goto out_unlock;
5166 }
5167#endif
5168
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005169 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005170 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5171 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005172 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005173 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005174 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005175
Steven Rostedt2cadf912008-12-01 22:20:19 -05005176 /* Do we have previous read data to read? */
5177 if (info->read < PAGE_SIZE)
5178 goto read;
5179
Steven Rostedtb6273442013-02-28 13:44:11 -05005180 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005181 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005182 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005183 &info->spare,
5184 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005185 iter->cpu_file, 0);
5186 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005187
5188 if (ret < 0) {
5189 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005190 if ((filp->f_flags & O_NONBLOCK)) {
5191 size = -EAGAIN;
5192 goto out_unlock;
5193 }
5194 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005195 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005196 mutex_lock(&trace_types_lock);
5197 if (signal_pending(current)) {
5198 size = -EINTR;
5199 goto out_unlock;
5200 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005201 goto again;
5202 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005203 size = 0;
5204 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005205 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005206
Steven Rostedt436fc282011-10-14 10:44:25 -04005207 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005208 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005209 size = PAGE_SIZE - info->read;
5210 if (size > count)
5211 size = count;
5212
5213 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005214 if (ret == size) {
5215 size = -EFAULT;
5216 goto out_unlock;
5217 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005218 size -= ret;
5219
Steven Rostedt2cadf912008-12-01 22:20:19 -05005220 *ppos += size;
5221 info->read += size;
5222
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005223 out_unlock:
5224 mutex_unlock(&trace_types_lock);
5225
Steven Rostedt2cadf912008-12-01 22:20:19 -05005226 return size;
5227}
5228
5229static int tracing_buffers_release(struct inode *inode, struct file *file)
5230{
5231 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005232 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005233
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005234 mutex_lock(&trace_types_lock);
5235
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005236 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005237
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005238 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005239 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005240 kfree(info);
5241
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005242 mutex_unlock(&trace_types_lock);
5243
Steven Rostedt2cadf912008-12-01 22:20:19 -05005244 return 0;
5245}
5246
5247struct buffer_ref {
5248 struct ring_buffer *buffer;
5249 void *page;
5250 int ref;
5251};
5252
5253static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5254 struct pipe_buffer *buf)
5255{
5256 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5257
5258 if (--ref->ref)
5259 return;
5260
5261 ring_buffer_free_read_page(ref->buffer, ref->page);
5262 kfree(ref);
5263 buf->private = 0;
5264}
5265
Steven Rostedt2cadf912008-12-01 22:20:19 -05005266static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5267 struct pipe_buffer *buf)
5268{
5269 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5270
5271 ref->ref++;
5272}
5273
5274/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005275static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005277 .confirm = generic_pipe_buf_confirm,
5278 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005279 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005280 .get = buffer_pipe_buf_get,
5281};
5282
5283/*
5284 * Callback from splice_to_pipe(), if we need to release some pages
5285 * at the end of the spd in case we error'ed out in filling the pipe.
5286 */
5287static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5288{
5289 struct buffer_ref *ref =
5290 (struct buffer_ref *)spd->partial[i].private;
5291
5292 if (--ref->ref)
5293 return;
5294
5295 ring_buffer_free_read_page(ref->buffer, ref->page);
5296 kfree(ref);
5297 spd->partial[i].private = 0;
5298}
5299
5300static ssize_t
5301tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5302 struct pipe_inode_info *pipe, size_t len,
5303 unsigned int flags)
5304{
5305 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005306 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005307 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5308 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005309 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005310 .pages = pages_def,
5311 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005312 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005313 .flags = flags,
5314 .ops = &buffer_pipe_buf_ops,
5315 .spd_release = buffer_spd_release,
5316 };
5317 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005318 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005319 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005320
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005321 mutex_lock(&trace_types_lock);
5322
5323#ifdef CONFIG_TRACER_MAX_TRACE
5324 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5325 ret = -EBUSY;
5326 goto out;
5327 }
5328#endif
5329
5330 if (splice_grow_spd(pipe, &spd)) {
5331 ret = -ENOMEM;
5332 goto out;
5333 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005334
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005335 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005336 ret = -EINVAL;
5337 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005338 }
5339
5340 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005341 if (len < PAGE_SIZE) {
5342 ret = -EINVAL;
5343 goto out;
5344 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005345 len &= PAGE_MASK;
5346 }
5347
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005348 again:
5349 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005350 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005351
Al Viroa786c062014-04-11 12:01:03 -04005352 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005353 struct page *page;
5354 int r;
5355
5356 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5357 if (!ref)
5358 break;
5359
Steven Rostedt7267fa62009-04-29 00:16:21 -04005360 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005361 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005362 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005363 if (!ref->page) {
5364 kfree(ref);
5365 break;
5366 }
5367
5368 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005369 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005370 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005371 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005372 kfree(ref);
5373 break;
5374 }
5375
5376 /*
5377 * zero out any left over data, this is going to
5378 * user land.
5379 */
5380 size = ring_buffer_page_len(ref->page);
5381 if (size < PAGE_SIZE)
5382 memset(ref->page + size, 0, PAGE_SIZE - size);
5383
5384 page = virt_to_page(ref->page);
5385
5386 spd.pages[i] = page;
5387 spd.partial[i].len = PAGE_SIZE;
5388 spd.partial[i].offset = 0;
5389 spd.partial[i].private = (unsigned long)ref;
5390 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005391 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005392
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005393 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005394 }
5395
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005396 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005397 spd.nr_pages = i;
5398
5399 /* did we read anything? */
5400 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005401 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005402 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005403 goto out;
5404 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005405 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)b1169cc2014-04-29 17:54:37 -04005406 wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005407 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005408 if (signal_pending(current)) {
5409 ret = -EINTR;
5410 goto out;
5411 }
5412 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005413 }
5414
5415 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005416 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005417out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005418 mutex_unlock(&trace_types_lock);
5419
Steven Rostedt2cadf912008-12-01 22:20:19 -05005420 return ret;
5421}
5422
5423static const struct file_operations tracing_buffers_fops = {
5424 .open = tracing_buffers_open,
5425 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005426 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005427 .release = tracing_buffers_release,
5428 .splice_read = tracing_buffers_splice_read,
5429 .llseek = no_llseek,
5430};
5431
Steven Rostedtc8d77182009-04-29 18:03:45 -04005432static ssize_t
5433tracing_stats_read(struct file *filp, char __user *ubuf,
5434 size_t count, loff_t *ppos)
5435{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005436 struct inode *inode = file_inode(filp);
5437 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005438 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005439 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005440 struct trace_seq *s;
5441 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005442 unsigned long long t;
5443 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005444
Li Zefane4f2d102009-06-15 10:57:28 +08005445 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005446 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005447 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005448
5449 trace_seq_init(s);
5450
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005451 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005452 trace_seq_printf(s, "entries: %ld\n", cnt);
5453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005454 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005455 trace_seq_printf(s, "overrun: %ld\n", cnt);
5456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005458 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5459
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005460 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005461 trace_seq_printf(s, "bytes: %ld\n", cnt);
5462
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005463 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005464 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005465 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005466 usec_rem = do_div(t, USEC_PER_SEC);
5467 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5468 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005470 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005471 usec_rem = do_div(t, USEC_PER_SEC);
5472 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5473 } else {
5474 /* counter or tsc mode for trace_clock */
5475 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005476 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005477
5478 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005479 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005480 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005481
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005482 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005483 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5484
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005485 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005486 trace_seq_printf(s, "read events: %ld\n", cnt);
5487
Steven Rostedtc8d77182009-04-29 18:03:45 -04005488 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5489
5490 kfree(s);
5491
5492 return count;
5493}
5494
5495static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005496 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005497 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005498 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005499 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005500};
5501
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005502#ifdef CONFIG_DYNAMIC_FTRACE
5503
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005504int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005505{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005506 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005507}
5508
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005509static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005510tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005511 size_t cnt, loff_t *ppos)
5512{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005513 static char ftrace_dyn_info_buffer[1024];
5514 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005515 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005516 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005517 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005518 int r;
5519
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005520 mutex_lock(&dyn_info_mutex);
5521 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005522
Steven Rostedta26a2a22008-10-31 00:03:22 -04005523 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005524 buf[r++] = '\n';
5525
5526 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5527
5528 mutex_unlock(&dyn_info_mutex);
5529
5530 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005531}
5532
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005533static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005534 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005535 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005536 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005537};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005538#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005539
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005540#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5541static void
5542ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005543{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005544 tracing_snapshot();
5545}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005546
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005547static void
5548ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5549{
5550 unsigned long *count = (long *)data;
5551
5552 if (!*count)
5553 return;
5554
5555 if (*count != -1)
5556 (*count)--;
5557
5558 tracing_snapshot();
5559}
5560
5561static int
5562ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5563 struct ftrace_probe_ops *ops, void *data)
5564{
5565 long count = (long)data;
5566
5567 seq_printf(m, "%ps:", (void *)ip);
5568
5569 seq_printf(m, "snapshot");
5570
5571 if (count == -1)
5572 seq_printf(m, ":unlimited\n");
5573 else
5574 seq_printf(m, ":count=%ld\n", count);
5575
5576 return 0;
5577}
5578
5579static struct ftrace_probe_ops snapshot_probe_ops = {
5580 .func = ftrace_snapshot,
5581 .print = ftrace_snapshot_print,
5582};
5583
5584static struct ftrace_probe_ops snapshot_count_probe_ops = {
5585 .func = ftrace_count_snapshot,
5586 .print = ftrace_snapshot_print,
5587};
5588
5589static int
5590ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5591 char *glob, char *cmd, char *param, int enable)
5592{
5593 struct ftrace_probe_ops *ops;
5594 void *count = (void *)-1;
5595 char *number;
5596 int ret;
5597
5598 /* hash funcs only work with set_ftrace_filter */
5599 if (!enable)
5600 return -EINVAL;
5601
5602 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5603
5604 if (glob[0] == '!') {
5605 unregister_ftrace_function_probe_func(glob+1, ops);
5606 return 0;
5607 }
5608
5609 if (!param)
5610 goto out_reg;
5611
5612 number = strsep(&param, ":");
5613
5614 if (!strlen(number))
5615 goto out_reg;
5616
5617 /*
5618 * We use the callback data field (which is a pointer)
5619 * as our counter.
5620 */
5621 ret = kstrtoul(number, 0, (unsigned long *)&count);
5622 if (ret)
5623 return ret;
5624
5625 out_reg:
5626 ret = register_ftrace_function_probe(glob, ops, count);
5627
5628 if (ret >= 0)
5629 alloc_snapshot(&global_trace);
5630
5631 return ret < 0 ? ret : 0;
5632}
5633
5634static struct ftrace_func_command ftrace_snapshot_cmd = {
5635 .name = "snapshot",
5636 .func = ftrace_trace_snapshot_callback,
5637};
5638
Tom Zanussi38de93a2013-10-24 08:34:18 -05005639static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005640{
5641 return register_ftrace_command(&ftrace_snapshot_cmd);
5642}
5643#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005644static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005645#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005646
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005647struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005648{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005649 if (tr->dir)
5650 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005651
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005652 if (!debugfs_initialized())
5653 return NULL;
5654
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005655 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5656 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005657
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005658 if (!tr->dir)
5659 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005660
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005661 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005662}
5663
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005664struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005665{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005666 return tracing_init_dentry_tr(&global_trace);
5667}
5668
5669static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5670{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005671 struct dentry *d_tracer;
5672
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005673 if (tr->percpu_dir)
5674 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005675
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005676 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005677 if (!d_tracer)
5678 return NULL;
5679
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005680 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005681
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005682 WARN_ONCE(!tr->percpu_dir,
5683 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005684
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005685 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005686}
5687
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005688static struct dentry *
5689trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5690 void *data, long cpu, const struct file_operations *fops)
5691{
5692 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5693
5694 if (ret) /* See tracing_get_cpu() */
5695 ret->d_inode->i_cdev = (void *)(cpu + 1);
5696 return ret;
5697}
5698
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005699static void
5700tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005701{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005702 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005703 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005704 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005705
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005706 if (!d_percpu)
5707 return;
5708
Steven Rostedtdd49a382010-10-20 21:51:26 -04005709 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005710 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5711 if (!d_cpu) {
5712 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5713 return;
5714 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005715
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005716 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005717 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005718 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005719
5720 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005721 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005722 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005723
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005724 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005725 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005726
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005727 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005728 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005729
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005730 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005731 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005732
5733#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005734 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005735 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005736
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005737 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005738 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005739#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005740}
5741
Steven Rostedt60a11772008-05-12 21:20:44 +02005742#ifdef CONFIG_FTRACE_SELFTEST
5743/* Let selftest have access to static functions in this file */
5744#include "trace_selftest.c"
5745#endif
5746
Steven Rostedt577b7852009-02-26 23:43:05 -05005747struct trace_option_dentry {
5748 struct tracer_opt *opt;
5749 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005750 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005751 struct dentry *entry;
5752};
5753
5754static ssize_t
5755trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5756 loff_t *ppos)
5757{
5758 struct trace_option_dentry *topt = filp->private_data;
5759 char *buf;
5760
5761 if (topt->flags->val & topt->opt->bit)
5762 buf = "1\n";
5763 else
5764 buf = "0\n";
5765
5766 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5767}
5768
5769static ssize_t
5770trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5771 loff_t *ppos)
5772{
5773 struct trace_option_dentry *topt = filp->private_data;
5774 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005775 int ret;
5776
Peter Huewe22fe9b52011-06-07 21:58:27 +02005777 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5778 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005779 return ret;
5780
Li Zefan8d18eaa2009-12-08 11:17:06 +08005781 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005782 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005783
5784 if (!!(topt->flags->val & topt->opt->bit) != val) {
5785 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005786 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005787 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005788 mutex_unlock(&trace_types_lock);
5789 if (ret)
5790 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005791 }
5792
5793 *ppos += cnt;
5794
5795 return cnt;
5796}
5797
5798
5799static const struct file_operations trace_options_fops = {
5800 .open = tracing_open_generic,
5801 .read = trace_options_read,
5802 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005803 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005804};
5805
Steven Rostedta8259072009-02-26 22:19:12 -05005806static ssize_t
5807trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5808 loff_t *ppos)
5809{
5810 long index = (long)filp->private_data;
5811 char *buf;
5812
5813 if (trace_flags & (1 << index))
5814 buf = "1\n";
5815 else
5816 buf = "0\n";
5817
5818 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5819}
5820
5821static ssize_t
5822trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5823 loff_t *ppos)
5824{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005826 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005827 unsigned long val;
5828 int ret;
5829
Peter Huewe22fe9b52011-06-07 21:58:27 +02005830 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5831 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005832 return ret;
5833
Zhaoleif2d84b62009-08-07 18:55:48 +08005834 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005835 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005836
5837 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005838 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005839 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005840
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005841 if (ret < 0)
5842 return ret;
5843
Steven Rostedta8259072009-02-26 22:19:12 -05005844 *ppos += cnt;
5845
5846 return cnt;
5847}
5848
Steven Rostedta8259072009-02-26 22:19:12 -05005849static const struct file_operations trace_options_core_fops = {
5850 .open = tracing_open_generic,
5851 .read = trace_options_core_read,
5852 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005853 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005854};
5855
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005856struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005857 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005858 struct dentry *parent,
5859 void *data,
5860 const struct file_operations *fops)
5861{
5862 struct dentry *ret;
5863
5864 ret = debugfs_create_file(name, mode, parent, data, fops);
5865 if (!ret)
5866 pr_warning("Could not create debugfs '%s' entry\n", name);
5867
5868 return ret;
5869}
5870
5871
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005872static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005873{
5874 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005876 if (tr->options)
5877 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005878
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005879 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005880 if (!d_tracer)
5881 return NULL;
5882
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005883 tr->options = debugfs_create_dir("options", d_tracer);
5884 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005885 pr_warning("Could not create debugfs directory 'options'\n");
5886 return NULL;
5887 }
5888
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005889 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005890}
5891
Steven Rostedt577b7852009-02-26 23:43:05 -05005892static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005893create_trace_option_file(struct trace_array *tr,
5894 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005895 struct tracer_flags *flags,
5896 struct tracer_opt *opt)
5897{
5898 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005899
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005900 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005901 if (!t_options)
5902 return;
5903
5904 topt->flags = flags;
5905 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005906 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005907
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005908 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005909 &trace_options_fops);
5910
Steven Rostedt577b7852009-02-26 23:43:05 -05005911}
5912
5913static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005914create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005915{
5916 struct trace_option_dentry *topts;
5917 struct tracer_flags *flags;
5918 struct tracer_opt *opts;
5919 int cnt;
5920
5921 if (!tracer)
5922 return NULL;
5923
5924 flags = tracer->flags;
5925
5926 if (!flags || !flags->opts)
5927 return NULL;
5928
5929 opts = flags->opts;
5930
5931 for (cnt = 0; opts[cnt].name; cnt++)
5932 ;
5933
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005934 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005935 if (!topts)
5936 return NULL;
5937
5938 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005939 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005940 &opts[cnt]);
5941
5942 return topts;
5943}
5944
5945static void
5946destroy_trace_option_files(struct trace_option_dentry *topts)
5947{
5948 int cnt;
5949
5950 if (!topts)
5951 return;
5952
5953 for (cnt = 0; topts[cnt].opt; cnt++) {
5954 if (topts[cnt].entry)
5955 debugfs_remove(topts[cnt].entry);
5956 }
5957
5958 kfree(topts);
5959}
5960
Steven Rostedta8259072009-02-26 22:19:12 -05005961static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005962create_trace_option_core_file(struct trace_array *tr,
5963 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005964{
5965 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005967 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005968 if (!t_options)
5969 return NULL;
5970
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005971 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005972 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005973}
5974
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005975static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005976{
5977 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005978 int i;
5979
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005980 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005981 if (!t_options)
5982 return;
5983
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005984 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005985 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005986}
5987
Steven Rostedt499e5472012-02-22 15:50:28 -05005988static ssize_t
5989rb_simple_read(struct file *filp, char __user *ubuf,
5990 size_t cnt, loff_t *ppos)
5991{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005992 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005993 char buf[64];
5994 int r;
5995
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005996 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005997 r = sprintf(buf, "%d\n", r);
5998
5999 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6000}
6001
6002static ssize_t
6003rb_simple_write(struct file *filp, const char __user *ubuf,
6004 size_t cnt, loff_t *ppos)
6005{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006006 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006007 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006008 unsigned long val;
6009 int ret;
6010
6011 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6012 if (ret)
6013 return ret;
6014
6015 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006016 mutex_lock(&trace_types_lock);
6017 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006018 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006019 if (tr->current_trace->start)
6020 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006021 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006022 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006023 if (tr->current_trace->stop)
6024 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006025 }
6026 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006027 }
6028
6029 (*ppos)++;
6030
6031 return cnt;
6032}
6033
6034static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006035 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006036 .read = rb_simple_read,
6037 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006038 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006039 .llseek = default_llseek,
6040};
6041
Steven Rostedt277ba042012-08-03 16:10:49 -04006042struct dentry *trace_instance_dir;
6043
6044static void
6045init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6046
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006047static int
6048allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006049{
6050 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006051
6052 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6053
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006054 buf->tr = tr;
6055
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006056 buf->buffer = ring_buffer_alloc(size, rb_flags);
6057 if (!buf->buffer)
6058 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006059
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006060 buf->data = alloc_percpu(struct trace_array_cpu);
6061 if (!buf->data) {
6062 ring_buffer_free(buf->buffer);
6063 return -ENOMEM;
6064 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006065
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006066 /* Allocate the first page for all buffers */
6067 set_buffer_entries(&tr->trace_buffer,
6068 ring_buffer_size(tr->trace_buffer.buffer, 0));
6069
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006070 return 0;
6071}
6072
6073static int allocate_trace_buffers(struct trace_array *tr, int size)
6074{
6075 int ret;
6076
6077 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6078 if (ret)
6079 return ret;
6080
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006081#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006082 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6083 allocate_snapshot ? size : 1);
6084 if (WARN_ON(ret)) {
6085 ring_buffer_free(tr->trace_buffer.buffer);
6086 free_percpu(tr->trace_buffer.data);
6087 return -ENOMEM;
6088 }
6089 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006090
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006091 /*
6092 * Only the top level trace array gets its snapshot allocated
6093 * from the kernel command line.
6094 */
6095 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006096#endif
6097 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006098}
6099
6100static int new_instance_create(const char *name)
6101{
Steven Rostedt277ba042012-08-03 16:10:49 -04006102 struct trace_array *tr;
6103 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006104
6105 mutex_lock(&trace_types_lock);
6106
6107 ret = -EEXIST;
6108 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6109 if (tr->name && strcmp(tr->name, name) == 0)
6110 goto out_unlock;
6111 }
6112
6113 ret = -ENOMEM;
6114 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6115 if (!tr)
6116 goto out_unlock;
6117
6118 tr->name = kstrdup(name, GFP_KERNEL);
6119 if (!tr->name)
6120 goto out_free_tr;
6121
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006122 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6123 goto out_free_tr;
6124
6125 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6126
Steven Rostedt277ba042012-08-03 16:10:49 -04006127 raw_spin_lock_init(&tr->start_lock);
6128
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006129 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6130
Steven Rostedt277ba042012-08-03 16:10:49 -04006131 tr->current_trace = &nop_trace;
6132
6133 INIT_LIST_HEAD(&tr->systems);
6134 INIT_LIST_HEAD(&tr->events);
6135
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006136 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006137 goto out_free_tr;
6138
Steven Rostedt277ba042012-08-03 16:10:49 -04006139 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6140 if (!tr->dir)
6141 goto out_free_tr;
6142
6143 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006144 if (ret) {
6145 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006146 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006147 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006148
6149 init_tracer_debugfs(tr, tr->dir);
6150
6151 list_add(&tr->list, &ftrace_trace_arrays);
6152
6153 mutex_unlock(&trace_types_lock);
6154
6155 return 0;
6156
6157 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006158 if (tr->trace_buffer.buffer)
6159 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006160 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006161 kfree(tr->name);
6162 kfree(tr);
6163
6164 out_unlock:
6165 mutex_unlock(&trace_types_lock);
6166
6167 return ret;
6168
6169}
6170
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006171static int instance_delete(const char *name)
6172{
6173 struct trace_array *tr;
6174 int found = 0;
6175 int ret;
6176
6177 mutex_lock(&trace_types_lock);
6178
6179 ret = -ENODEV;
6180 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6181 if (tr->name && strcmp(tr->name, name) == 0) {
6182 found = 1;
6183 break;
6184 }
6185 }
6186 if (!found)
6187 goto out_unlock;
6188
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006189 ret = -EBUSY;
6190 if (tr->ref)
6191 goto out_unlock;
6192
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006193 list_del(&tr->list);
6194
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006195 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006196 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006197 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006198 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006199 free_percpu(tr->trace_buffer.data);
6200 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006201
6202 kfree(tr->name);
6203 kfree(tr);
6204
6205 ret = 0;
6206
6207 out_unlock:
6208 mutex_unlock(&trace_types_lock);
6209
6210 return ret;
6211}
6212
Steven Rostedt277ba042012-08-03 16:10:49 -04006213static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6214{
6215 struct dentry *parent;
6216 int ret;
6217
6218 /* Paranoid: Make sure the parent is the "instances" directory */
6219 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6220 if (WARN_ON_ONCE(parent != trace_instance_dir))
6221 return -ENOENT;
6222
6223 /*
6224 * The inode mutex is locked, but debugfs_create_dir() will also
6225 * take the mutex. As the instances directory can not be destroyed
6226 * or changed in any other way, it is safe to unlock it, and
6227 * let the dentry try. If two users try to make the same dir at
6228 * the same time, then the new_instance_create() will determine the
6229 * winner.
6230 */
6231 mutex_unlock(&inode->i_mutex);
6232
6233 ret = new_instance_create(dentry->d_iname);
6234
6235 mutex_lock(&inode->i_mutex);
6236
6237 return ret;
6238}
6239
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006240static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6241{
6242 struct dentry *parent;
6243 int ret;
6244
6245 /* Paranoid: Make sure the parent is the "instances" directory */
6246 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6247 if (WARN_ON_ONCE(parent != trace_instance_dir))
6248 return -ENOENT;
6249
6250 /* The caller did a dget() on dentry */
6251 mutex_unlock(&dentry->d_inode->i_mutex);
6252
6253 /*
6254 * The inode mutex is locked, but debugfs_create_dir() will also
6255 * take the mutex. As the instances directory can not be destroyed
6256 * or changed in any other way, it is safe to unlock it, and
6257 * let the dentry try. If two users try to make the same dir at
6258 * the same time, then the instance_delete() will determine the
6259 * winner.
6260 */
6261 mutex_unlock(&inode->i_mutex);
6262
6263 ret = instance_delete(dentry->d_iname);
6264
6265 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6266 mutex_lock(&dentry->d_inode->i_mutex);
6267
6268 return ret;
6269}
6270
Steven Rostedt277ba042012-08-03 16:10:49 -04006271static const struct inode_operations instance_dir_inode_operations = {
6272 .lookup = simple_lookup,
6273 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006274 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006275};
6276
6277static __init void create_trace_instances(struct dentry *d_tracer)
6278{
6279 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6280 if (WARN_ON(!trace_instance_dir))
6281 return;
6282
6283 /* Hijack the dir inode operations, to allow mkdir */
6284 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6285}
6286
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006287static void
6288init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6289{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006290 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006291
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006292 trace_create_file("available_tracers", 0444, d_tracer,
6293 tr, &show_traces_fops);
6294
6295 trace_create_file("current_tracer", 0644, d_tracer,
6296 tr, &set_tracer_fops);
6297
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006298 trace_create_file("tracing_cpumask", 0644, d_tracer,
6299 tr, &tracing_cpumask_fops);
6300
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006301 trace_create_file("trace_options", 0644, d_tracer,
6302 tr, &tracing_iter_fops);
6303
6304 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006305 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006306
6307 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006308 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006309
6310 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006311 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006312
6313 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6314 tr, &tracing_total_entries_fops);
6315
Wang YanQing238ae932013-05-26 16:52:01 +08006316 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006317 tr, &tracing_free_buffer_fops);
6318
6319 trace_create_file("trace_marker", 0220, d_tracer,
6320 tr, &tracing_mark_fops);
6321
6322 trace_create_file("trace_clock", 0644, d_tracer, tr,
6323 &trace_clock_fops);
6324
6325 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006326 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006327
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006328#ifdef CONFIG_TRACER_MAX_TRACE
6329 trace_create_file("tracing_max_latency", 0644, d_tracer,
6330 &tr->max_latency, &tracing_max_lat_fops);
6331#endif
6332
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006333 if (ftrace_create_function_files(tr, d_tracer))
6334 WARN(1, "Could not allocate function filter files");
6335
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006336#ifdef CONFIG_TRACER_SNAPSHOT
6337 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006338 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006339#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006340
6341 for_each_tracing_cpu(cpu)
6342 tracing_init_debugfs_percpu(tr, cpu);
6343
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006344}
6345
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006346static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006347{
6348 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006349
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006350 trace_access_lock_init();
6351
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006352 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006353 if (!d_tracer)
6354 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006356 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006357
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006358 trace_create_file("tracing_thresh", 0644, d_tracer,
6359 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006360
Li Zefan339ae5d2009-04-17 10:34:30 +08006361 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006362 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006363
Avadh Patel69abe6a2009-04-10 16:04:48 -04006364 trace_create_file("saved_cmdlines", 0444, d_tracer,
6365 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006366
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006367#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006368 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6369 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006370#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006371
Steven Rostedt277ba042012-08-03 16:10:49 -04006372 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006373
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006374 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006375
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006376 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006377}
6378
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006379static int trace_panic_handler(struct notifier_block *this,
6380 unsigned long event, void *unused)
6381{
Steven Rostedt944ac422008-10-23 19:26:08 -04006382 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006383 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006384 return NOTIFY_OK;
6385}
6386
6387static struct notifier_block trace_panic_notifier = {
6388 .notifier_call = trace_panic_handler,
6389 .next = NULL,
6390 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6391};
6392
6393static int trace_die_handler(struct notifier_block *self,
6394 unsigned long val,
6395 void *data)
6396{
6397 switch (val) {
6398 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006399 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006400 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006401 break;
6402 default:
6403 break;
6404 }
6405 return NOTIFY_OK;
6406}
6407
6408static struct notifier_block trace_die_notifier = {
6409 .notifier_call = trace_die_handler,
6410 .priority = 200
6411};
6412
6413/*
6414 * printk is set to max of 1024, we really don't need it that big.
6415 * Nothing should be printing 1000 characters anyway.
6416 */
6417#define TRACE_MAX_PRINT 1000
6418
6419/*
6420 * Define here KERN_TRACE so that we have one place to modify
6421 * it if we decide to change what log level the ftrace dump
6422 * should be at.
6423 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006424#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006425
Jason Wessel955b61e2010-08-05 09:22:23 -05006426void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006427trace_printk_seq(struct trace_seq *s)
6428{
6429 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006430 if (s->len >= TRACE_MAX_PRINT)
6431 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006432
6433 /* should be zero ended, but we are paranoid. */
6434 s->buffer[s->len] = 0;
6435
6436 printk(KERN_TRACE "%s", s->buffer);
6437
Steven Rostedtf9520752009-03-02 14:04:40 -05006438 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006439}
6440
Jason Wessel955b61e2010-08-05 09:22:23 -05006441void trace_init_global_iter(struct trace_iterator *iter)
6442{
6443 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006444 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006445 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006446 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006447
6448 if (iter->trace && iter->trace->open)
6449 iter->trace->open(iter);
6450
6451 /* Annotate start of buffers if we had overruns */
6452 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6453 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6454
6455 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6456 if (trace_clocks[iter->tr->clock_id].in_ns)
6457 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006458}
6459
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006460void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006461{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006462 /* use static because iter can be a bit big for the stack */
6463 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006464 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006465 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006466 unsigned long flags;
6467 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006468
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006469 /* Only allow one dump user at a time. */
6470 if (atomic_inc_return(&dump_running) != 1) {
6471 atomic_dec(&dump_running);
6472 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006473 }
6474
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006475 /*
6476 * Always turn off tracing when we dump.
6477 * We don't need to show trace output of what happens
6478 * between multiple crashes.
6479 *
6480 * If the user does a sysrq-z, then they can re-enable
6481 * tracing with echo 1 > tracing_on.
6482 */
6483 tracing_off();
6484
6485 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006486
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006487 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006488 trace_init_global_iter(&iter);
6489
Steven Rostedtd7690412008-10-01 00:29:53 -04006490 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006491 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006492 }
6493
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006494 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6495
Török Edwinb54d3de2008-11-22 13:28:48 +02006496 /* don't look at user memory in panic mode */
6497 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6498
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006499 switch (oops_dump_mode) {
6500 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006501 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006502 break;
6503 case DUMP_ORIG:
6504 iter.cpu_file = raw_smp_processor_id();
6505 break;
6506 case DUMP_NONE:
6507 goto out_enable;
6508 default:
6509 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006510 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006511 }
6512
6513 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006514
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006515 /* Did function tracer already get disabled? */
6516 if (ftrace_is_dead()) {
6517 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6518 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6519 }
6520
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006521 /*
6522 * We need to stop all tracing on all CPUS to read the
6523 * the next buffer. This is a bit expensive, but is
6524 * not done often. We fill all what we can read,
6525 * and then release the locks again.
6526 */
6527
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006528 while (!trace_empty(&iter)) {
6529
6530 if (!cnt)
6531 printk(KERN_TRACE "---------------------------------\n");
6532
6533 cnt++;
6534
6535 /* reset all but tr, trace, and overruns */
6536 memset(&iter.seq, 0,
6537 sizeof(struct trace_iterator) -
6538 offsetof(struct trace_iterator, seq));
6539 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6540 iter.pos = -1;
6541
Jason Wessel955b61e2010-08-05 09:22:23 -05006542 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006543 int ret;
6544
6545 ret = print_trace_line(&iter);
6546 if (ret != TRACE_TYPE_NO_CONSUME)
6547 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006548 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006549 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006550
6551 trace_printk_seq(&iter.seq);
6552 }
6553
6554 if (!cnt)
6555 printk(KERN_TRACE " (ftrace buffer empty)\n");
6556 else
6557 printk(KERN_TRACE "---------------------------------\n");
6558
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006559 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006560 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006561
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006562 for_each_tracing_cpu(cpu) {
6563 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006564 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006565 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006566 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006567}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006568EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006569
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006570__init static int tracer_alloc_buffers(void)
6571{
Steven Rostedt73c51622009-03-11 13:42:01 -04006572 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306573 int ret = -ENOMEM;
6574
David Sharp750912f2010-12-08 13:46:47 -08006575
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306576 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6577 goto out;
6578
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006579 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306580 goto out_free_buffer_mask;
6581
Steven Rostedt07d777f2011-09-22 14:01:55 -04006582 /* Only allocate trace_printk buffers if a trace_printk exists */
6583 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006584 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006585 trace_printk_init_buffers();
6586
Steven Rostedt73c51622009-03-11 13:42:01 -04006587 /* To save memory, keep the ring buffer size to its minimum */
6588 if (ring_buffer_expanded)
6589 ring_buf_size = trace_buf_size;
6590 else
6591 ring_buf_size = 1;
6592
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306593 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006594 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006595
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006596 raw_spin_lock_init(&global_trace.start_lock);
6597
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006598 /* Used for event triggers */
6599 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6600 if (!temp_buffer)
6601 goto out_free_cpumask;
6602
Steven Rostedtab464282008-05-12 21:21:00 +02006603 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006604 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006605 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6606 WARN_ON(1);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006607 goto out_free_temp_buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006608 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006609
Steven Rostedt499e5472012-02-22 15:50:28 -05006610 if (global_trace.buffer_disabled)
6611 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006612
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006613 trace_init_cmdlines();
6614
Steven Rostedte1e232c2014-02-10 23:38:46 -05006615 if (trace_boot_clock) {
6616 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6617 if (ret < 0)
6618 pr_warning("Trace clock %s not defined, going back to default\n",
6619 trace_boot_clock);
6620 }
6621
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006622 /*
6623 * register_tracer() might reference current_trace, so it
6624 * needs to be set before we register anything. This is
6625 * just a bootstrap of current_trace anyway.
6626 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006627 global_trace.current_trace = &nop_trace;
6628
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006629 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6630
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006631 ftrace_init_global_array_ops(&global_trace);
6632
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006633 register_tracer(&nop_trace);
6634
Steven Rostedt60a11772008-05-12 21:20:44 +02006635 /* All seems OK, enable tracing */
6636 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006637
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006638 atomic_notifier_chain_register(&panic_notifier_list,
6639 &trace_panic_notifier);
6640
6641 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006642
Steven Rostedtae63b312012-05-03 23:09:03 -04006643 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6644
6645 INIT_LIST_HEAD(&global_trace.systems);
6646 INIT_LIST_HEAD(&global_trace.events);
6647 list_add(&global_trace.list, &ftrace_trace_arrays);
6648
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006649 while (trace_boot_options) {
6650 char *option;
6651
6652 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006653 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006654 }
6655
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006656 register_snapshot_cmd();
6657
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006658 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006659
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006660out_free_temp_buffer:
6661 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306662out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006663 free_percpu(global_trace.trace_buffer.data);
6664#ifdef CONFIG_TRACER_MAX_TRACE
6665 free_percpu(global_trace.max_buffer.data);
6666#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006667 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306668out_free_buffer_mask:
6669 free_cpumask_var(tracing_buffer_mask);
6670out:
6671 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006672}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006673
6674__init static int clear_boot_tracer(void)
6675{
6676 /*
6677 * The default tracer at boot buffer is an init section.
6678 * This function is called in lateinit. If we did not
6679 * find the boot tracer, then clear it out, to prevent
6680 * later registration from accessing the buffer that is
6681 * about to be freed.
6682 */
6683 if (!default_bootup_tracer)
6684 return 0;
6685
6686 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6687 default_bootup_tracer);
6688 default_bootup_tracer = NULL;
6689
6690 return 0;
6691}
6692
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006693early_initcall(tracer_alloc_buffers);
6694fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006695late_initcall(clear_boot_tracer);