blob: 15209335888db6c6087fcf4178e36a9aed278499 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800469 int pc;
470
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800474 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500475
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500476 if (unlikely(tracing_selftest_running || tracing_disabled))
477 return 0;
478
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500479 alloc = sizeof(*entry) + size + 2; /* possible \n added */
480
481 local_save_flags(irq_flags);
482 buffer = global_trace.trace_buffer.buffer;
483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800484 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500485 if (!event)
486 return 0;
487
488 entry = ring_buffer_event_data(event);
489 entry->ip = ip;
490
491 memcpy(&entry->buf, str, size);
492
493 /* Add a newline if necessary */
494 if (entry->buf[size - 1] != '\n') {
495 entry->buf[size] = '\n';
496 entry->buf[size + 1] = '\0';
497 } else
498 entry->buf[size] = '\0';
499
500 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500502
503 return size;
504}
505EXPORT_SYMBOL_GPL(__trace_puts);
506
507/**
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
511 */
512int __trace_bputs(unsigned long ip, const char *str)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct bputs_entry *entry;
517 unsigned long irq_flags;
518 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800519 int pc;
520
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800524 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500525
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500529 local_save_flags(irq_flags);
530 buffer = global_trace.trace_buffer.buffer;
531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800532 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500533 if (!event)
534 return 0;
535
536 entry = ring_buffer_event_data(event);
537 entry->ip = ip;
538 entry->str = str;
539
540 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500542
543 return 1;
544}
545EXPORT_SYMBOL_GPL(__trace_bputs);
546
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500547#ifdef CONFIG_TRACER_SNAPSHOT
548/**
549 * trace_snapshot - take a snapshot of the current buffer.
550 *
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
554 *
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
558 *
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
561 */
562void tracing_snapshot(void)
563{
564 struct trace_array *tr = &global_trace;
565 struct tracer *tracer = tr->current_trace;
566 unsigned long flags;
567
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500568 if (in_nmi()) {
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
571 return;
572 }
573
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500574 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500577 tracing_off();
578 return;
579 }
580
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500585 return;
586 }
587
588 local_irq_save(flags);
589 update_max_tr(tr, current, smp_processor_id());
590 local_irq_restore(flags);
591}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500592EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500593
594static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
595 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400596static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
597
598static int alloc_snapshot(struct trace_array *tr)
599{
600 int ret;
601
602 if (!tr->allocated_snapshot) {
603
604 /* allocate spare buffer */
605 ret = resize_buffer_duplicate_size(&tr->max_buffer,
606 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
607 if (ret < 0)
608 return ret;
609
610 tr->allocated_snapshot = true;
611 }
612
613 return 0;
614}
615
Fabian Frederickad1438a2014-04-17 21:44:42 +0200616static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400617{
618 /*
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
622 */
623 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
624 set_buffer_entries(&tr->max_buffer, 1);
625 tracing_reset_online_cpus(&tr->max_buffer);
626 tr->allocated_snapshot = false;
627}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500628
629/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500630 * tracing_alloc_snapshot - allocate snapshot buffer.
631 *
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
634 *
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
638 */
639int tracing_alloc_snapshot(void)
640{
641 struct trace_array *tr = &global_trace;
642 int ret;
643
644 ret = alloc_snapshot(tr);
645 WARN_ON(ret < 0);
646
647 return ret;
648}
649EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
650
651/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
653 *
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
657 *
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
661 */
662void tracing_snapshot_alloc(void)
663{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 int ret;
665
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500666 ret = tracing_alloc_snapshot();
667 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400668 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500669
670 tracing_snapshot();
671}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500672EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500673#else
674void tracing_snapshot(void)
675{
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
677}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500678EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500679int tracing_alloc_snapshot(void)
680{
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
682 return -ENODEV;
683}
684EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685void tracing_snapshot_alloc(void)
686{
687 /* Give warning */
688 tracing_snapshot();
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500691#endif /* CONFIG_TRACER_SNAPSHOT */
692
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400693static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694{
695 if (tr->trace_buffer.buffer)
696 ring_buffer_record_off(tr->trace_buffer.buffer);
697 /*
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
704 */
705 tr->buffer_disabled = 1;
706 /* Make the flag seen by readers */
707 smp_wmb();
708}
709
Steven Rostedt499e5472012-02-22 15:50:28 -0500710/**
711 * tracing_off - turn off tracing buffers
712 *
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
717 */
718void tracing_off(void)
719{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400720 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500721}
722EXPORT_SYMBOL_GPL(tracing_off);
723
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400724void disable_trace_on_warning(void)
725{
726 if (__disable_trace_on_warning)
727 tracing_off();
728}
729
Steven Rostedt499e5472012-02-22 15:50:28 -0500730/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
733 *
734 * Shows real state of the ring buffer if it is enabled or not.
735 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400736static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400737{
738 if (tr->trace_buffer.buffer)
739 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
740 return !tr->buffer_disabled;
741}
742
Steven Rostedt499e5472012-02-22 15:50:28 -0500743/**
744 * tracing_is_on - show state of ring buffers enabled
745 */
746int tracing_is_on(void)
747{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400748 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500749}
750EXPORT_SYMBOL_GPL(tracing_is_on);
751
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400752static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200753{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400754 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200756 if (!str)
757 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800758 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200759 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800760 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200761 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400762 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200763 return 1;
764}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400765__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200766
Tim Bird0e950172010-02-25 15:36:43 -0800767static int __init set_tracing_thresh(char *str)
768{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800769 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800770 int ret;
771
772 if (!str)
773 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200774 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800775 if (ret < 0)
776 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800777 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800778 return 1;
779}
780__setup("tracing_thresh=", set_tracing_thresh);
781
Steven Rostedt57f50be2008-05-12 21:20:44 +0200782unsigned long nsecs_to_usecs(unsigned long nsecs)
783{
784 return nsecs / 1000;
785}
786
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200787/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200788static const char *trace_options[] = {
789 "print-parent",
790 "sym-offset",
791 "sym-addr",
792 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200793 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200794 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200795 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200796 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200797 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100798 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500799 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500800 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500801 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200802 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200803 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100804 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200805 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500806 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400807 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400808 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800809 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800810 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400811 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500812 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700813 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400814 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200815 NULL
816};
817
Zhaolei5079f322009-08-25 16:12:56 +0800818static struct {
819 u64 (*func)(void);
820 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800821 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800822} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700826 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000827 { trace_clock, "perf", 1 },
828 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800829 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800830};
831
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200832/*
833 * trace_parser_get_init - gets the buffer for trace parser
834 */
835int trace_parser_get_init(struct trace_parser *parser, int size)
836{
837 memset(parser, 0, sizeof(*parser));
838
839 parser->buffer = kmalloc(size, GFP_KERNEL);
840 if (!parser->buffer)
841 return 1;
842
843 parser->size = size;
844 return 0;
845}
846
847/*
848 * trace_parser_put - frees the buffer for trace parser
849 */
850void trace_parser_put(struct trace_parser *parser)
851{
852 kfree(parser->buffer);
853}
854
855/*
856 * trace_get_user - reads the user input string separated by space
857 * (matched by isspace(ch))
858 *
859 * For each string found the 'struct trace_parser' is updated,
860 * and the function returns.
861 *
862 * Returns number of bytes read.
863 *
864 * See kernel/trace/trace.h for 'struct trace_parser' details.
865 */
866int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
867 size_t cnt, loff_t *ppos)
868{
869 char ch;
870 size_t read = 0;
871 ssize_t ret;
872
873 if (!*ppos)
874 trace_parser_clear(parser);
875
876 ret = get_user(ch, ubuf++);
877 if (ret)
878 goto out;
879
880 read++;
881 cnt--;
882
883 /*
884 * The parser is not finished with the last write,
885 * continue reading the user input without skipping spaces.
886 */
887 if (!parser->cont) {
888 /* skip white space */
889 while (cnt && isspace(ch)) {
890 ret = get_user(ch, ubuf++);
891 if (ret)
892 goto out;
893 read++;
894 cnt--;
895 }
896
897 /* only spaces were written */
898 if (isspace(ch)) {
899 *ppos += read;
900 ret = read;
901 goto out;
902 }
903
904 parser->idx = 0;
905 }
906
907 /* read the non-space input */
908 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800909 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200910 parser->buffer[parser->idx++] = ch;
911 else {
912 ret = -EINVAL;
913 goto out;
914 }
915 ret = get_user(ch, ubuf++);
916 if (ret)
917 goto out;
918 read++;
919 cnt--;
920 }
921
922 /* We either got finished input or we have to wait for another call. */
923 if (isspace(ch)) {
924 parser->buffer[parser->idx] = 0;
925 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400926 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200927 parser->cont = true;
928 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400929 } else {
930 ret = -EINVAL;
931 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200932 }
933
934 *ppos += read;
935 ret = read;
936
937out:
938 return ret;
939}
940
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200941static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200942{
943 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200944
945 if (s->len <= s->readpos)
946 return -EBUSY;
947
948 len = s->len - s->readpos;
949 if (cnt > len)
950 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300951 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200952
Steven Rostedte74da522009-03-04 20:31:11 -0500953 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200954 return cnt;
955}
956
Tim Bird0e950172010-02-25 15:36:43 -0800957unsigned long __read_mostly tracing_thresh;
958
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400959#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400960/*
961 * Copy the new maximum trace into the separate maximum-trace
962 * structure. (this way the maximum trace is permanently saved,
963 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
964 */
965static void
966__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
967{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500968 struct trace_buffer *trace_buf = &tr->trace_buffer;
969 struct trace_buffer *max_buf = &tr->max_buffer;
970 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
971 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500973 max_buf->cpu = cpu;
974 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400975
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500976 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400977 max_data->critical_start = data->critical_start;
978 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400979
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300980 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400981 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400982 /*
983 * If tsk == current, then use current_uid(), as that does not use
984 * RCU. The irq tracer can be called out of RCU scope.
985 */
986 if (tsk == current)
987 max_data->uid = current_uid();
988 else
989 max_data->uid = task_uid(tsk);
990
Steven Rostedt8248ac02009-09-02 12:27:41 -0400991 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
992 max_data->policy = tsk->policy;
993 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
995 /* record this tasks comm */
996 tracing_record_cmdline(tsk);
997}
998
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200999/**
1000 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1001 * @tr: tracer
1002 * @tsk: the task with the latency
1003 * @cpu: The cpu that initiated the trace.
1004 *
1005 * Flip the buffers between the @tr and the max_tr and record information
1006 * about which task was the cause of this latency.
1007 */
Ingo Molnare309b412008-05-12 21:20:51 +02001008void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001009update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1010{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001011 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001012
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001013 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001014 return;
1015
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001016 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001017
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001018 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001019 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001020 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001021 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001022 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001023
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001024 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001025
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001026 buf = tr->trace_buffer.buffer;
1027 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1028 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001029
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001030 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001031 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001032}
1033
1034/**
1035 * update_max_tr_single - only copy one trace over, and reset the rest
1036 * @tr - tracer
1037 * @tsk - task with the latency
1038 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001039 *
1040 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041 */
Ingo Molnare309b412008-05-12 21:20:51 +02001042void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1044{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001045 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001046
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001047 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001048 return;
1049
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001050 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001051 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001052 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001053 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001054 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001055 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001056
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001057 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001059 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060
Steven Rostedte8165db2009-09-03 19:13:05 -04001061 if (ret == -EBUSY) {
1062 /*
1063 * We failed to swap the buffer due to a commit taking
1064 * place on this CPU. We fail to record, but we reset
1065 * the max trace buffer (no one writes directly to it)
1066 * and flag that it failed.
1067 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001069 "Failed to swap buffers due to commit in progress\n");
1070 }
1071
Steven Rostedte8165db2009-09-03 19:13:05 -04001072 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
1074 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001075 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001076}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001077#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001078
Rabin Vincente30f53a2014-11-10 19:46:34 +01001079static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001080{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001083 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001084
Rabin Vincente30f53a2014-11-10 19:46:34 +01001085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1086 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001087}
1088
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001089#ifdef CONFIG_FTRACE_STARTUP_TEST
1090static int run_tracer_selftest(struct tracer *type)
1091{
1092 struct trace_array *tr = &global_trace;
1093 struct tracer *saved_tracer = tr->current_trace;
1094 int ret;
1095
1096 if (!type->selftest || tracing_selftest_disabled)
1097 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001098
1099 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001100 * Run a selftest on this tracer.
1101 * Here we reset the trace buffer, and set the current
1102 * tracer to be this tracer. The tracer can then run some
1103 * internal tracing to verify that everything is in order.
1104 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001105 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001106 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001107
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001109
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001110#ifdef CONFIG_TRACER_MAX_TRACE
1111 if (type->use_max_tr) {
1112 /* If we expanded the buffers, make sure the max is expanded too */
1113 if (ring_buffer_expanded)
1114 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1115 RING_BUFFER_ALL_CPUS);
1116 tr->allocated_snapshot = true;
1117 }
1118#endif
1119
1120 /* the test is responsible for initializing and enabling */
1121 pr_info("Testing tracer %s: ", type->name);
1122 ret = type->selftest(type, tr);
1123 /* the test is responsible for resetting too */
1124 tr->current_trace = saved_tracer;
1125 if (ret) {
1126 printk(KERN_CONT "FAILED!\n");
1127 /* Add the warning after printing 'FAILED' */
1128 WARN_ON(1);
1129 return -1;
1130 }
1131 /* Only reset on passing, to avoid touching corrupted buffers */
1132 tracing_reset_online_cpus(&tr->trace_buffer);
1133
1134#ifdef CONFIG_TRACER_MAX_TRACE
1135 if (type->use_max_tr) {
1136 tr->allocated_snapshot = false;
1137
1138 /* Shrink the max buffer again */
1139 if (ring_buffer_expanded)
1140 ring_buffer_resize(tr->max_buffer.buffer, 1,
1141 RING_BUFFER_ALL_CPUS);
1142 }
1143#endif
1144
1145 printk(KERN_CONT "PASSED\n");
1146 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001147}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001148#else
1149static inline int run_tracer_selftest(struct tracer *type)
1150{
1151 return 0;
1152}
1153#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001155/**
1156 * register_tracer - register a tracer with the ftrace system.
1157 * @type - the plugin for the tracer
1158 *
1159 * Register a new plugin tracer.
1160 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001161int register_tracer(struct tracer *type)
1162{
1163 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001164 int ret = 0;
1165
1166 if (!type->name) {
1167 pr_info("Tracer must have a name\n");
1168 return -1;
1169 }
1170
Dan Carpenter24a461d2010-07-10 12:06:44 +02001171 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001172 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1173 return -1;
1174 }
1175
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001176 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001177
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001178 tracing_selftest_running = true;
1179
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001180 for (t = trace_types; t; t = t->next) {
1181 if (strcmp(type->name, t->name) == 0) {
1182 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001183 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 type->name);
1185 ret = -1;
1186 goto out;
1187 }
1188 }
1189
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001190 if (!type->set_flag)
1191 type->set_flag = &dummy_set_flag;
1192 if (!type->flags)
1193 type->flags = &dummy_tracer_flags;
1194 else
1195 if (!type->flags->opts)
1196 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001197
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001198 ret = run_tracer_selftest(type);
1199 if (ret < 0)
1200 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001201
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001202 type->next = trace_types;
1203 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001204
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001205 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001206 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001207 mutex_unlock(&trace_types_lock);
1208
Steven Rostedtdac74942009-02-05 01:13:38 -05001209 if (ret || !default_bootup_tracer)
1210 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001211
Li Zefanee6c2c12009-09-18 14:06:47 +08001212 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001213 goto out_unlock;
1214
1215 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1216 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001217 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001218 default_bootup_tracer = NULL;
1219 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001220 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001221#ifdef CONFIG_FTRACE_STARTUP_TEST
1222 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1223 type->name);
1224#endif
1225
1226 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001227 return ret;
1228}
1229
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001230void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001231{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001232 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001233
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001234 if (!buffer)
1235 return;
1236
Steven Rostedtf6339032009-09-04 12:35:16 -04001237 ring_buffer_record_disable(buffer);
1238
1239 /* Make sure all commits have finished */
1240 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001241 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001242
1243 ring_buffer_record_enable(buffer);
1244}
1245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001247{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001248 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001249 int cpu;
1250
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001251 if (!buffer)
1252 return;
1253
Steven Rostedt621968c2009-09-04 12:02:35 -04001254 ring_buffer_record_disable(buffer);
1255
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
1258
Alexander Z Lam94571582013-08-02 18:36:16 -07001259 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260
1261 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001262 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001263
1264 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001265}
1266
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001267/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001268void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001269{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001270 struct trace_array *tr;
1271
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001272 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001273 tracing_reset_online_cpus(&tr->trace_buffer);
1274#ifdef CONFIG_TRACER_MAX_TRACE
1275 tracing_reset_online_cpus(&tr->max_buffer);
1276#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001277 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001278}
1279
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001280#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001281#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001282static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001283struct saved_cmdlines_buffer {
1284 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1285 unsigned *map_cmdline_to_pid;
1286 unsigned cmdline_num;
1287 int cmdline_idx;
1288 char *saved_cmdlines;
1289};
1290static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001291
Steven Rostedt25b0b442008-05-12 21:21:00 +02001292/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001293static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001294
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001295static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001296{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001297 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1298}
1299
1300static inline void set_cmdline(int idx, const char *cmdline)
1301{
1302 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1303}
1304
1305static int allocate_cmdlines_buffer(unsigned int val,
1306 struct saved_cmdlines_buffer *s)
1307{
1308 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1309 GFP_KERNEL);
1310 if (!s->map_cmdline_to_pid)
1311 return -ENOMEM;
1312
1313 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1314 if (!s->saved_cmdlines) {
1315 kfree(s->map_cmdline_to_pid);
1316 return -ENOMEM;
1317 }
1318
1319 s->cmdline_idx = 0;
1320 s->cmdline_num = val;
1321 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1322 sizeof(s->map_pid_to_cmdline));
1323 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1324 val * sizeof(*s->map_cmdline_to_pid));
1325
1326 return 0;
1327}
1328
1329static int trace_create_savedcmd(void)
1330{
1331 int ret;
1332
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001333 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001334 if (!savedcmd)
1335 return -ENOMEM;
1336
1337 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1338 if (ret < 0) {
1339 kfree(savedcmd);
1340 savedcmd = NULL;
1341 return -ENOMEM;
1342 }
1343
1344 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001345}
1346
Carsten Emdeb5130b12009-09-13 01:43:07 +02001347int is_tracing_stopped(void)
1348{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001349 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001350}
1351
Steven Rostedt0f048702008-11-05 16:05:44 -05001352/**
1353 * tracing_start - quick start of the tracer
1354 *
1355 * If tracing is enabled but was stopped by tracing_stop,
1356 * this will start the tracer back up.
1357 */
1358void tracing_start(void)
1359{
1360 struct ring_buffer *buffer;
1361 unsigned long flags;
1362
1363 if (tracing_disabled)
1364 return;
1365
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001366 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1367 if (--global_trace.stop_count) {
1368 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001369 /* Someone screwed up their debugging */
1370 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001371 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001372 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001373 goto out;
1374 }
1375
Steven Rostedta2f80712010-03-12 19:56:00 -05001376 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001377 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001379 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001380 if (buffer)
1381 ring_buffer_record_enable(buffer);
1382
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001383#ifdef CONFIG_TRACER_MAX_TRACE
1384 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001385 if (buffer)
1386 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001388
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001389 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001390
Steven Rostedt0f048702008-11-05 16:05:44 -05001391 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001392 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1393}
1394
1395static void tracing_start_tr(struct trace_array *tr)
1396{
1397 struct ring_buffer *buffer;
1398 unsigned long flags;
1399
1400 if (tracing_disabled)
1401 return;
1402
1403 /* If global, we need to also start the max tracer */
1404 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1405 return tracing_start();
1406
1407 raw_spin_lock_irqsave(&tr->start_lock, flags);
1408
1409 if (--tr->stop_count) {
1410 if (tr->stop_count < 0) {
1411 /* Someone screwed up their debugging */
1412 WARN_ON_ONCE(1);
1413 tr->stop_count = 0;
1414 }
1415 goto out;
1416 }
1417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001418 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001419 if (buffer)
1420 ring_buffer_record_enable(buffer);
1421
1422 out:
1423 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001424}
1425
1426/**
1427 * tracing_stop - quick stop of the tracer
1428 *
1429 * Light weight way to stop tracing. Use in conjunction with
1430 * tracing_start.
1431 */
1432void tracing_stop(void)
1433{
1434 struct ring_buffer *buffer;
1435 unsigned long flags;
1436
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001437 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1438 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001439 goto out;
1440
Steven Rostedta2f80712010-03-12 19:56:00 -05001441 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001442 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001444 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001445 if (buffer)
1446 ring_buffer_record_disable(buffer);
1447
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001448#ifdef CONFIG_TRACER_MAX_TRACE
1449 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001450 if (buffer)
1451 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001452#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001453
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001454 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001455
Steven Rostedt0f048702008-11-05 16:05:44 -05001456 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001457 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1458}
1459
1460static void tracing_stop_tr(struct trace_array *tr)
1461{
1462 struct ring_buffer *buffer;
1463 unsigned long flags;
1464
1465 /* If global, we need to also stop the max tracer */
1466 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1467 return tracing_stop();
1468
1469 raw_spin_lock_irqsave(&tr->start_lock, flags);
1470 if (tr->stop_count++)
1471 goto out;
1472
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001473 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001474 if (buffer)
1475 ring_buffer_record_disable(buffer);
1476
1477 out:
1478 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001479}
1480
Ingo Molnare309b412008-05-12 21:20:51 +02001481void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001483static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484{
Carsten Emdea635cf02009-03-18 09:00:41 +01001485 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486
1487 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001488 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489
1490 /*
1491 * It's not the end of the world if we don't get
1492 * the lock, but we also don't want to spin
1493 * nor do we want to disable interrupts,
1494 * so if we miss here, then better luck next time.
1495 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001496 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001497 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001498
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001499 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001500 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001501 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
Carsten Emdea635cf02009-03-18 09:00:41 +01001503 /*
1504 * Check whether the cmdline buffer at idx has a pid
1505 * mapped. We are going to overwrite that entry so we
1506 * need to clear the map_pid_to_cmdline. Otherwise we
1507 * would read the new comm for the old pid.
1508 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001510 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001511 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001513 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1514 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001516 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 }
1518
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001519 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001521 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001522
1523 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524}
1525
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001526static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 unsigned map;
1529
Steven Rostedt4ca53082009-03-16 19:20:15 -04001530 if (!pid) {
1531 strcpy(comm, "<idle>");
1532 return;
1533 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534
Steven Rostedt74bf4072010-01-25 15:11:53 -05001535 if (WARN_ON_ONCE(pid < 0)) {
1536 strcpy(comm, "<XXX>");
1537 return;
1538 }
1539
Steven Rostedt4ca53082009-03-16 19:20:15 -04001540 if (pid > PID_MAX_DEFAULT) {
1541 strcpy(comm, "<...>");
1542 return;
1543 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001545 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001546 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001547 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001548 else
1549 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001550}
1551
1552void trace_find_cmdline(int pid, char comm[])
1553{
1554 preempt_disable();
1555 arch_spin_lock(&trace_cmdline_lock);
1556
1557 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001559 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001560 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001561}
1562
Ingo Molnare309b412008-05-12 21:20:51 +02001563void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001564{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001565 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566 return;
1567
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001568 if (!__this_cpu_read(trace_cmdline_save))
1569 return;
1570
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001571 if (trace_save_cmdline(tsk))
1572 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573}
1574
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001575void
Steven Rostedt38697052008-10-01 13:14:09 -04001576tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1577 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001578{
1579 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Steven Rostedt777e2082008-09-29 23:02:42 -04001581 entry->preempt_count = pc & 0xff;
1582 entry->pid = (tsk) ? tsk->pid : 0;
1583 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001584#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001585 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001586#else
1587 TRACE_FLAG_IRQS_NOSUPPORT |
1588#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1590 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001591 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1592 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001594EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595
Steven Rostedte77405a2009-09-02 14:17:06 -04001596struct ring_buffer_event *
1597trace_buffer_lock_reserve(struct ring_buffer *buffer,
1598 int type,
1599 unsigned long len,
1600 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001601{
1602 struct ring_buffer_event *event;
1603
Steven Rostedte77405a2009-09-02 14:17:06 -04001604 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001605 if (event != NULL) {
1606 struct trace_entry *ent = ring_buffer_event_data(event);
1607
1608 tracing_generic_entry_update(ent, flags, pc);
1609 ent->type = type;
1610 }
1611
1612 return event;
1613}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001614
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001615void
1616__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1617{
1618 __this_cpu_write(trace_cmdline_save, true);
1619 ring_buffer_unlock_commit(buffer, event);
1620}
1621
Steven Rostedte77405a2009-09-02 14:17:06 -04001622static inline void
1623__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1624 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001625 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001626{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001627 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001628
Steven Rostedte77405a2009-09-02 14:17:06 -04001629 ftrace_trace_stack(buffer, flags, 6, pc);
1630 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001631}
1632
Steven Rostedte77405a2009-09-02 14:17:06 -04001633void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1634 struct ring_buffer_event *event,
1635 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001636{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001637 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001638}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001639EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001640
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001641static struct ring_buffer *temp_buffer;
1642
Steven Rostedtef5580d2009-02-27 19:38:04 -05001643struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001644trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1645 struct ftrace_event_file *ftrace_file,
1646 int type, unsigned long len,
1647 unsigned long flags, int pc)
1648{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001649 struct ring_buffer_event *entry;
1650
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001651 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001652 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001653 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001654 /*
1655 * If tracing is off, but we have triggers enabled
1656 * we still need to look at the event data. Use the temp_buffer
1657 * to store the trace event for the tigger to use. It's recusive
1658 * safe and will not be recorded anywhere.
1659 */
1660 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1661 *current_rb = temp_buffer;
1662 entry = trace_buffer_lock_reserve(*current_rb,
1663 type, len, flags, pc);
1664 }
1665 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666}
1667EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1668
1669struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001670trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1671 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001672 unsigned long flags, int pc)
1673{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001674 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001675 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001676 type, len, flags, pc);
1677}
Steven Rostedt94487d62009-05-05 19:22:53 -04001678EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001679
Steven Rostedte77405a2009-09-02 14:17:06 -04001680void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1681 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001682 unsigned long flags, int pc)
1683{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001684 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001685}
Steven Rostedt94487d62009-05-05 19:22:53 -04001686EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001687
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001688void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1689 struct ring_buffer_event *event,
1690 unsigned long flags, int pc,
1691 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001692{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001693 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001694
1695 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1696 ftrace_trace_userstack(buffer, flags, pc);
1697}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001698EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001699
Steven Rostedte77405a2009-09-02 14:17:06 -04001700void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1701 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001702{
Steven Rostedte77405a2009-09-02 14:17:06 -04001703 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001704}
Steven Rostedt12acd472009-04-17 16:01:56 -04001705EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001706
Ingo Molnare309b412008-05-12 21:20:51 +02001707void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001708trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001709 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1710 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001711{
Tom Zanussie1112b42009-03-31 00:48:49 -05001712 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001713 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001714 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001715 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001716
Steven Rostedtd7690412008-10-01 00:29:53 -04001717 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001718 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001719 return;
1720
Steven Rostedte77405a2009-09-02 14:17:06 -04001721 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001722 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001723 if (!event)
1724 return;
1725 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001726 entry->ip = ip;
1727 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001728
Tom Zanussif306cc82013-10-24 08:34:17 -05001729 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001730 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001731}
1732
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001733#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001734
1735#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1736struct ftrace_stack {
1737 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1738};
1739
1740static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1741static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1742
Steven Rostedte77405a2009-09-02 14:17:06 -04001743static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001744 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001745 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001746{
Tom Zanussie1112b42009-03-31 00:48:49 -05001747 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001748 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001749 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001750 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001751 int use_stack;
1752 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001753
1754 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001755 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001756
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001757 /*
1758 * Since events can happen in NMIs there's no safe way to
1759 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1760 * or NMI comes in, it will just have to use the default
1761 * FTRACE_STACK_SIZE.
1762 */
1763 preempt_disable_notrace();
1764
Shan Wei82146522012-11-19 13:21:01 +08001765 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001766 /*
1767 * We don't need any atomic variables, just a barrier.
1768 * If an interrupt comes in, we don't care, because it would
1769 * have exited and put the counter back to what we want.
1770 * We just need a barrier to keep gcc from moving things
1771 * around.
1772 */
1773 barrier();
1774 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001775 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001776 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1777
1778 if (regs)
1779 save_stack_trace_regs(regs, &trace);
1780 else
1781 save_stack_trace(&trace);
1782
1783 if (trace.nr_entries > size)
1784 size = trace.nr_entries;
1785 } else
1786 /* From now on, use_stack is a boolean */
1787 use_stack = 0;
1788
1789 size *= sizeof(unsigned long);
1790
1791 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1792 sizeof(*entry) + size, flags, pc);
1793 if (!event)
1794 goto out;
1795 entry = ring_buffer_event_data(event);
1796
1797 memset(&entry->caller, 0, size);
1798
1799 if (use_stack)
1800 memcpy(&entry->caller, trace.entries,
1801 trace.nr_entries * sizeof(unsigned long));
1802 else {
1803 trace.max_entries = FTRACE_STACK_ENTRIES;
1804 trace.entries = entry->caller;
1805 if (regs)
1806 save_stack_trace_regs(regs, &trace);
1807 else
1808 save_stack_trace(&trace);
1809 }
1810
1811 entry->size = trace.nr_entries;
1812
Tom Zanussif306cc82013-10-24 08:34:17 -05001813 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001814 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001815
1816 out:
1817 /* Again, don't let gcc optimize things here */
1818 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001819 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001820 preempt_enable_notrace();
1821
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001822}
1823
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001824void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1825 int skip, int pc, struct pt_regs *regs)
1826{
1827 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1828 return;
1829
1830 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1831}
1832
Steven Rostedte77405a2009-09-02 14:17:06 -04001833void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1834 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001835{
1836 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1837 return;
1838
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001839 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001840}
1841
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001842void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1843 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001844{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001845 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001846}
1847
Steven Rostedt03889382009-12-11 09:48:22 -05001848/**
1849 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001850 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001851 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001852void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001853{
1854 unsigned long flags;
1855
1856 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001857 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001858
1859 local_save_flags(flags);
1860
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001861 /*
1862 * Skip 3 more, seems to get us at the caller of
1863 * this function.
1864 */
1865 skip += 3;
1866 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1867 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001868}
1869
Steven Rostedt91e86e52010-11-10 12:56:12 +01001870static DEFINE_PER_CPU(int, user_stack_count);
1871
Steven Rostedte77405a2009-09-02 14:17:06 -04001872void
1873ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001874{
Tom Zanussie1112b42009-03-31 00:48:49 -05001875 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001876 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001877 struct userstack_entry *entry;
1878 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001879
1880 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1881 return;
1882
Steven Rostedtb6345872010-03-12 20:03:30 -05001883 /*
1884 * NMIs can not handle page faults, even with fix ups.
1885 * The save user stack can (and often does) fault.
1886 */
1887 if (unlikely(in_nmi()))
1888 return;
1889
Steven Rostedt91e86e52010-11-10 12:56:12 +01001890 /*
1891 * prevent recursion, since the user stack tracing may
1892 * trigger other kernel events.
1893 */
1894 preempt_disable();
1895 if (__this_cpu_read(user_stack_count))
1896 goto out;
1897
1898 __this_cpu_inc(user_stack_count);
1899
Steven Rostedte77405a2009-09-02 14:17:06 -04001900 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001901 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001902 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001903 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001904 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001905
Steven Rostedt48659d32009-09-11 11:36:23 -04001906 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001907 memset(&entry->caller, 0, sizeof(entry->caller));
1908
1909 trace.nr_entries = 0;
1910 trace.max_entries = FTRACE_STACK_ENTRIES;
1911 trace.skip = 0;
1912 trace.entries = entry->caller;
1913
1914 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001915 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001916 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001917
Li Zefan1dbd1952010-12-09 15:47:56 +08001918 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001919 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001920 out:
1921 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001922}
1923
Hannes Eder4fd27352009-02-10 19:44:12 +01001924#ifdef UNUSED
1925static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001926{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001927 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001928}
Hannes Eder4fd27352009-02-10 19:44:12 +01001929#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001930
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001931#endif /* CONFIG_STACKTRACE */
1932
Steven Rostedt07d777f2011-09-22 14:01:55 -04001933/* created for use with alloc_percpu */
1934struct trace_buffer_struct {
1935 char buffer[TRACE_BUF_SIZE];
1936};
1937
1938static struct trace_buffer_struct *trace_percpu_buffer;
1939static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1940static struct trace_buffer_struct *trace_percpu_irq_buffer;
1941static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1942
1943/*
1944 * The buffer used is dependent on the context. There is a per cpu
1945 * buffer for normal context, softirq contex, hard irq context and
1946 * for NMI context. Thise allows for lockless recording.
1947 *
1948 * Note, if the buffers failed to be allocated, then this returns NULL
1949 */
1950static char *get_trace_buf(void)
1951{
1952 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001953
1954 /*
1955 * If we have allocated per cpu buffers, then we do not
1956 * need to do any locking.
1957 */
1958 if (in_nmi())
1959 percpu_buffer = trace_percpu_nmi_buffer;
1960 else if (in_irq())
1961 percpu_buffer = trace_percpu_irq_buffer;
1962 else if (in_softirq())
1963 percpu_buffer = trace_percpu_sirq_buffer;
1964 else
1965 percpu_buffer = trace_percpu_buffer;
1966
1967 if (!percpu_buffer)
1968 return NULL;
1969
Shan Weid8a03492012-11-13 09:53:04 +08001970 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001971}
1972
1973static int alloc_percpu_trace_buffer(void)
1974{
1975 struct trace_buffer_struct *buffers;
1976 struct trace_buffer_struct *sirq_buffers;
1977 struct trace_buffer_struct *irq_buffers;
1978 struct trace_buffer_struct *nmi_buffers;
1979
1980 buffers = alloc_percpu(struct trace_buffer_struct);
1981 if (!buffers)
1982 goto err_warn;
1983
1984 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1985 if (!sirq_buffers)
1986 goto err_sirq;
1987
1988 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1989 if (!irq_buffers)
1990 goto err_irq;
1991
1992 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1993 if (!nmi_buffers)
1994 goto err_nmi;
1995
1996 trace_percpu_buffer = buffers;
1997 trace_percpu_sirq_buffer = sirq_buffers;
1998 trace_percpu_irq_buffer = irq_buffers;
1999 trace_percpu_nmi_buffer = nmi_buffers;
2000
2001 return 0;
2002
2003 err_nmi:
2004 free_percpu(irq_buffers);
2005 err_irq:
2006 free_percpu(sirq_buffers);
2007 err_sirq:
2008 free_percpu(buffers);
2009 err_warn:
2010 WARN(1, "Could not allocate percpu trace_printk buffer");
2011 return -ENOMEM;
2012}
2013
Steven Rostedt81698832012-10-11 10:15:05 -04002014static int buffers_allocated;
2015
Steven Rostedt07d777f2011-09-22 14:01:55 -04002016void trace_printk_init_buffers(void)
2017{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002018 if (buffers_allocated)
2019 return;
2020
2021 if (alloc_percpu_trace_buffer())
2022 return;
2023
Steven Rostedt2184db42014-05-28 13:14:40 -04002024 /* trace_printk() is for debug use only. Don't use it in production. */
2025
2026 pr_warning("\n**********************************************************\n");
2027 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2028 pr_warning("** **\n");
2029 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2030 pr_warning("** **\n");
2031 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2032 pr_warning("** unsafe for produciton use. **\n");
2033 pr_warning("** **\n");
2034 pr_warning("** If you see this message and you are not debugging **\n");
2035 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2036 pr_warning("** **\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002039
Steven Rostedtb382ede62012-10-10 21:44:34 -04002040 /* Expand the buffers to set size */
2041 tracing_update_buffers();
2042
Steven Rostedt07d777f2011-09-22 14:01:55 -04002043 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002044
2045 /*
2046 * trace_printk_init_buffers() can be called by modules.
2047 * If that happens, then we need to start cmdline recording
2048 * directly here. If the global_trace.buffer is already
2049 * allocated here, then this was called by module code.
2050 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002051 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002052 tracing_start_cmdline_record();
2053}
2054
2055void trace_printk_start_comm(void)
2056{
2057 /* Start tracing comms if trace printk is set */
2058 if (!buffers_allocated)
2059 return;
2060 tracing_start_cmdline_record();
2061}
2062
2063static void trace_printk_start_stop_comm(int enabled)
2064{
2065 if (!buffers_allocated)
2066 return;
2067
2068 if (enabled)
2069 tracing_start_cmdline_record();
2070 else
2071 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002072}
2073
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002075 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002076 *
2077 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002078int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002079{
Tom Zanussie1112b42009-03-31 00:48:49 -05002080 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002081 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002082 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002083 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002084 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002085 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086 char *tbuffer;
2087 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002088
2089 if (unlikely(tracing_selftest_running || tracing_disabled))
2090 return 0;
2091
2092 /* Don't pollute graph traces with trace_vprintk internals */
2093 pause_graph_tracing();
2094
2095 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002096 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002097
Steven Rostedt07d777f2011-09-22 14:01:55 -04002098 tbuffer = get_trace_buf();
2099 if (!tbuffer) {
2100 len = 0;
2101 goto out;
2102 }
2103
2104 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2105
2106 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002107 goto out;
2108
Steven Rostedt07d777f2011-09-22 14:01:55 -04002109 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002111 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002112 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2113 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002114 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002115 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002116 entry = ring_buffer_event_data(event);
2117 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002118 entry->fmt = fmt;
2119
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002121 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002122 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002123 ftrace_trace_stack(buffer, flags, 6, pc);
2124 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002127 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128 unpause_graph_tracing();
2129
2130 return len;
2131}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002132EXPORT_SYMBOL_GPL(trace_vbprintk);
2133
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002134static int
2135__trace_array_vprintk(struct ring_buffer *buffer,
2136 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137{
Tom Zanussie1112b42009-03-31 00:48:49 -05002138 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002139 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002140 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002141 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002142 unsigned long flags;
2143 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002144
2145 if (tracing_disabled || tracing_selftest_running)
2146 return 0;
2147
Steven Rostedt07d777f2011-09-22 14:01:55 -04002148 /* Don't pollute graph traces with trace_vprintk internals */
2149 pause_graph_tracing();
2150
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151 pc = preempt_count();
2152 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153
Steven Rostedt07d777f2011-09-22 14:01:55 -04002154
2155 tbuffer = get_trace_buf();
2156 if (!tbuffer) {
2157 len = 0;
2158 goto out;
2159 }
2160
2161 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2162 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002163 goto out;
2164
Steven Rostedt07d777f2011-09-22 14:01:55 -04002165 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002166 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002167 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002168 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002169 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002170 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002171 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002172 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002173
Steven Rostedt07d777f2011-09-22 14:01:55 -04002174 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002175 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002176 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002177 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002178 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002179 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180 out:
2181 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002182 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002183
2184 return len;
2185}
Steven Rostedt659372d2009-09-03 19:11:07 -04002186
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002187int trace_array_vprintk(struct trace_array *tr,
2188 unsigned long ip, const char *fmt, va_list args)
2189{
2190 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2191}
2192
2193int trace_array_printk(struct trace_array *tr,
2194 unsigned long ip, const char *fmt, ...)
2195{
2196 int ret;
2197 va_list ap;
2198
2199 if (!(trace_flags & TRACE_ITER_PRINTK))
2200 return 0;
2201
2202 va_start(ap, fmt);
2203 ret = trace_array_vprintk(tr, ip, fmt, ap);
2204 va_end(ap);
2205 return ret;
2206}
2207
2208int trace_array_printk_buf(struct ring_buffer *buffer,
2209 unsigned long ip, const char *fmt, ...)
2210{
2211 int ret;
2212 va_list ap;
2213
2214 if (!(trace_flags & TRACE_ITER_PRINTK))
2215 return 0;
2216
2217 va_start(ap, fmt);
2218 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2219 va_end(ap);
2220 return ret;
2221}
2222
Steven Rostedt659372d2009-09-03 19:11:07 -04002223int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2224{
Steven Rostedta813a152009-10-09 01:41:35 -04002225 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002226}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002227EXPORT_SYMBOL_GPL(trace_vprintk);
2228
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002229static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002230{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002231 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2232
Steven Rostedt5a90f572008-09-03 17:42:51 -04002233 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002234 if (buf_iter)
2235 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002236}
2237
Ingo Molnare309b412008-05-12 21:20:51 +02002238static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002239peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2240 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002241{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002242 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002243 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002244
Steven Rostedtd7690412008-10-01 00:29:53 -04002245 if (buf_iter)
2246 event = ring_buffer_iter_peek(buf_iter, ts);
2247 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002248 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002250
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002251 if (event) {
2252 iter->ent_size = ring_buffer_event_length(event);
2253 return ring_buffer_event_data(event);
2254 }
2255 iter->ent_size = 0;
2256 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002257}
Steven Rostedtd7690412008-10-01 00:29:53 -04002258
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002259static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002260__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2261 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002262{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002263 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002264 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002265 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002266 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002267 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002268 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002269 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270 int cpu;
2271
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002272 /*
2273 * If we are in a per_cpu trace file, don't bother by iterating over
2274 * all cpu and peek directly.
2275 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002276 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002277 if (ring_buffer_empty_cpu(buffer, cpu_file))
2278 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002279 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002280 if (ent_cpu)
2281 *ent_cpu = cpu_file;
2282
2283 return ent;
2284 }
2285
Steven Rostedtab464282008-05-12 21:21:00 +02002286 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002287
2288 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002289 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002290
Steven Rostedtbc21b472010-03-31 19:49:26 -04002291 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002292
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002293 /*
2294 * Pick the entry with the smallest timestamp:
2295 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002296 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297 next = ent;
2298 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002299 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002300 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002301 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002302 }
2303 }
2304
Steven Rostedt12b5da32012-03-27 10:43:28 -04002305 iter->ent_size = next_size;
2306
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 if (ent_cpu)
2308 *ent_cpu = next_cpu;
2309
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002310 if (ent_ts)
2311 *ent_ts = next_ts;
2312
Steven Rostedtbc21b472010-03-31 19:49:26 -04002313 if (missing_events)
2314 *missing_events = next_lost;
2315
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316 return next;
2317}
2318
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002319/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002320struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2321 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002322{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002323 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002324}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002325
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002326/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002327void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002328{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002329 iter->ent = __find_next_entry(iter, &iter->cpu,
2330 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002331
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002332 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002333 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002335 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002336}
2337
Ingo Molnare309b412008-05-12 21:20:51 +02002338static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002339{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002340 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002341 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002342}
2343
Ingo Molnare309b412008-05-12 21:20:51 +02002344static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002345{
2346 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002348 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002350 WARN_ON_ONCE(iter->leftover);
2351
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352 (*pos)++;
2353
2354 /* can't go backwards */
2355 if (iter->idx > i)
2356 return NULL;
2357
2358 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002359 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360 else
2361 ent = iter;
2362
2363 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002364 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365
2366 iter->pos = *pos;
2367
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 return ent;
2369}
2370
Jason Wessel955b61e2010-08-05 09:22:23 -05002371void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002372{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002373 struct ring_buffer_event *event;
2374 struct ring_buffer_iter *buf_iter;
2375 unsigned long entries = 0;
2376 u64 ts;
2377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002378 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002379
Steven Rostedt6d158a82012-06-27 20:46:14 -04002380 buf_iter = trace_buffer_iter(iter, cpu);
2381 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382 return;
2383
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002384 ring_buffer_iter_reset(buf_iter);
2385
2386 /*
2387 * We could have the case with the max latency tracers
2388 * that a reset never took place on a cpu. This is evident
2389 * by the timestamp being before the start of the buffer.
2390 */
2391 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002392 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002393 break;
2394 entries++;
2395 ring_buffer_read(buf_iter, NULL);
2396 }
2397
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002398 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002399}
2400
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002401/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002402 * The current tracer is copied to avoid a global locking
2403 * all around.
2404 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002405static void *s_start(struct seq_file *m, loff_t *pos)
2406{
2407 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002408 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002409 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410 void *p = NULL;
2411 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002412 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002414 /*
2415 * copy the tracer to avoid using a global lock all around.
2416 * iter->trace is a copy of current_trace, the pointer to the
2417 * name may be used instead of a strcmp(), as iter->trace->name
2418 * will point to the same string as current_trace->name.
2419 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002421 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2422 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002423 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002426 if (iter->snapshot && iter->trace->use_max_tr)
2427 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002428#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002429
2430 if (!iter->snapshot)
2431 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002433 if (*pos != iter->pos) {
2434 iter->ent = NULL;
2435 iter->cpu = 0;
2436 iter->idx = -1;
2437
Steven Rostedtae3b5092013-01-23 15:22:59 -05002438 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002439 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002440 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002441 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002442 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443
Lai Jiangshanac91d852010-03-02 17:54:50 +08002444 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002445 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2446 ;
2447
2448 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002449 /*
2450 * If we overflowed the seq_file before, then we want
2451 * to just reuse the trace_seq buffer again.
2452 */
2453 if (iter->leftover)
2454 p = iter;
2455 else {
2456 l = *pos - 1;
2457 p = s_next(m, p, &l);
2458 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459 }
2460
Lai Jiangshan4f535962009-05-18 19:35:34 +08002461 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002462 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002463 return p;
2464}
2465
2466static void s_stop(struct seq_file *m, void *p)
2467{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002468 struct trace_iterator *iter = m->private;
2469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002470#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002471 if (iter->snapshot && iter->trace->use_max_tr)
2472 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002473#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002474
2475 if (!iter->snapshot)
2476 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002478 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002479 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002480}
2481
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483get_total_entries(struct trace_buffer *buf,
2484 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002485{
2486 unsigned long count;
2487 int cpu;
2488
2489 *total = 0;
2490 *entries = 0;
2491
2492 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002494 /*
2495 * If this buffer has skipped entries, then we hold all
2496 * entries for the trace and we need to ignore the
2497 * ones before the time stamp.
2498 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002499 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2500 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002501 /* total is the same as the entries */
2502 *total += count;
2503 } else
2504 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002505 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002506 *entries += count;
2507 }
2508}
2509
Ingo Molnare309b412008-05-12 21:20:51 +02002510static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002511{
Michael Ellermana6168352008-08-20 16:36:11 -07002512 seq_puts(m, "# _------=> CPU# \n");
2513 seq_puts(m, "# / _-----=> irqs-off \n");
2514 seq_puts(m, "# | / _----=> need-resched \n");
2515 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2516 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002517 seq_puts(m, "# |||| / delay \n");
2518 seq_puts(m, "# cmd pid ||||| time | caller \n");
2519 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520}
2521
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002522static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002523{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002524 unsigned long total;
2525 unsigned long entries;
2526
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002527 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002528 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2529 entries, total, num_online_cpus());
2530 seq_puts(m, "#\n");
2531}
2532
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002533static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002536 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002537 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002538}
2539
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002540static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002541{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002542 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002543 seq_puts(m, "# _-----=> irqs-off\n");
2544 seq_puts(m, "# / _----=> need-resched\n");
2545 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2546 seq_puts(m, "# || / _--=> preempt-depth\n");
2547 seq_puts(m, "# ||| / delay\n");
2548 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2549 seq_puts(m, "# | | | |||| | |\n");
2550}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002551
Jiri Olsa62b915f2010-04-02 19:01:22 +02002552void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002553print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2554{
2555 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002556 struct trace_buffer *buf = iter->trace_buffer;
2557 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002558 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002559 unsigned long entries;
2560 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561 const char *name = "preemption";
2562
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002563 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002564
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002565 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002567 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002568 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002569 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002570 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002571 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002573 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002575 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002576 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577#if defined(CONFIG_PREEMPT_NONE)
2578 "server",
2579#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2580 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002581#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 "preempt",
2583#else
2584 "unknown",
2585#endif
2586 /* These are reserved for later use */
2587 0, 0, 0, 0);
2588#ifdef CONFIG_SMP
2589 seq_printf(m, " #P:%d)\n", num_online_cpus());
2590#else
2591 seq_puts(m, ")\n");
2592#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002593 seq_puts(m, "# -----------------\n");
2594 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002596 data->comm, data->pid,
2597 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002599 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002600
2601 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002602 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002603 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2604 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002605 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002606 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2607 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002608 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002609 }
2610
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002611 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002612}
2613
Steven Rostedta3097202008-11-07 22:36:02 -05002614static void test_cpu_buff_start(struct trace_iterator *iter)
2615{
2616 struct trace_seq *s = &iter->seq;
2617
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002618 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2619 return;
2620
2621 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2622 return;
2623
Rusty Russell44623442009-01-01 10:12:23 +10302624 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002625 return;
2626
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002627 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002628 return;
2629
Rusty Russell44623442009-01-01 10:12:23 +10302630 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002631
2632 /* Don't print started cpu buffer for the first entry of the trace */
2633 if (iter->idx > 1)
2634 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2635 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002636}
2637
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002638static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002639{
Steven Rostedt214023c2008-05-12 21:20:46 +02002640 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002641 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002642 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002643 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002644
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002645 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002646
Steven Rostedta3097202008-11-07 22:36:02 -05002647 test_cpu_buff_start(iter);
2648
Steven Rostedtf633cef2008-12-23 23:24:13 -05002649 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002650
2651 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002652 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2653 if (!trace_print_lat_context(iter))
2654 goto partial;
2655 } else {
2656 if (!trace_print_context(iter))
2657 goto partial;
2658 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002659 }
2660
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002661 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002662 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002663
2664 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2665 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002666
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002667 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002668partial:
2669 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002670}
2671
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002672static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002673{
2674 struct trace_seq *s = &iter->seq;
2675 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002676 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002677
2678 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002679
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002680 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002681 if (!trace_seq_printf(s, "%d %d %llu ",
2682 entry->pid, iter->cpu, iter->ts))
2683 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002684 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002685
Steven Rostedtf633cef2008-12-23 23:24:13 -05002686 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002687 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002688 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002689
2690 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2691 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002692
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002693 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002694partial:
2695 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002696}
2697
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002698static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002699{
2700 struct trace_seq *s = &iter->seq;
2701 unsigned char newline = '\n';
2702 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002703 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002704
2705 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002706
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002707 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2708 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2709 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2710 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2711 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002712
Steven Rostedtf633cef2008-12-23 23:24:13 -05002713 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002714 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002715 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002716 if (ret != TRACE_TYPE_HANDLED)
2717 return ret;
2718 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002719
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002720 SEQ_PUT_FIELD_RET(s, newline);
2721
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002722 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002723}
2724
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002725static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002726{
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002729 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002730
2731 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002732
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002733 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2734 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002735 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002736 SEQ_PUT_FIELD_RET(s, iter->ts);
2737 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002738
Steven Rostedtf633cef2008-12-23 23:24:13 -05002739 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002740 return event ? event->funcs->binary(iter, 0, event) :
2741 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002742}
2743
Jiri Olsa62b915f2010-04-02 19:01:22 +02002744int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002746 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002747 int cpu;
2748
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002749 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002750 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002751 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002752 buf_iter = trace_buffer_iter(iter, cpu);
2753 if (buf_iter) {
2754 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002755 return 0;
2756 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002757 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002758 return 0;
2759 }
2760 return 1;
2761 }
2762
Steven Rostedtab464282008-05-12 21:21:00 +02002763 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002764 buf_iter = trace_buffer_iter(iter, cpu);
2765 if (buf_iter) {
2766 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002767 return 0;
2768 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002769 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002770 return 0;
2771 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002772 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002773
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002774 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002775}
2776
Lai Jiangshan4f535962009-05-18 19:35:34 +08002777/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002778enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002779{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002780 enum print_line_t ret;
2781
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002782 if (iter->lost_events &&
2783 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2784 iter->cpu, iter->lost_events))
2785 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002786
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002787 if (iter->trace && iter->trace->print_line) {
2788 ret = iter->trace->print_line(iter);
2789 if (ret != TRACE_TYPE_UNHANDLED)
2790 return ret;
2791 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002792
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002793 if (iter->ent->type == TRACE_BPUTS &&
2794 trace_flags & TRACE_ITER_PRINTK &&
2795 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2796 return trace_print_bputs_msg_only(iter);
2797
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002798 if (iter->ent->type == TRACE_BPRINT &&
2799 trace_flags & TRACE_ITER_PRINTK &&
2800 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002801 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002802
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002803 if (iter->ent->type == TRACE_PRINT &&
2804 trace_flags & TRACE_ITER_PRINTK &&
2805 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002806 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002807
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002808 if (trace_flags & TRACE_ITER_BIN)
2809 return print_bin_fmt(iter);
2810
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002811 if (trace_flags & TRACE_ITER_HEX)
2812 return print_hex_fmt(iter);
2813
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002814 if (trace_flags & TRACE_ITER_RAW)
2815 return print_raw_fmt(iter);
2816
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002817 return print_trace_fmt(iter);
2818}
2819
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002820void trace_latency_header(struct seq_file *m)
2821{
2822 struct trace_iterator *iter = m->private;
2823
2824 /* print nothing if the buffers are empty */
2825 if (trace_empty(iter))
2826 return;
2827
2828 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2829 print_trace_header(m, iter);
2830
2831 if (!(trace_flags & TRACE_ITER_VERBOSE))
2832 print_lat_help_header(m);
2833}
2834
Jiri Olsa62b915f2010-04-02 19:01:22 +02002835void trace_default_header(struct seq_file *m)
2836{
2837 struct trace_iterator *iter = m->private;
2838
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002839 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2840 return;
2841
Jiri Olsa62b915f2010-04-02 19:01:22 +02002842 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2843 /* print nothing if the buffers are empty */
2844 if (trace_empty(iter))
2845 return;
2846 print_trace_header(m, iter);
2847 if (!(trace_flags & TRACE_ITER_VERBOSE))
2848 print_lat_help_header(m);
2849 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002850 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2851 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002852 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002853 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002854 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002855 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002856 }
2857}
2858
Steven Rostedte0a413f2011-09-29 21:26:16 -04002859static void test_ftrace_alive(struct seq_file *m)
2860{
2861 if (!ftrace_is_dead())
2862 return;
2863 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2864 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2865}
2866
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002867#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002868static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002869{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002870 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2871 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2872 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002873 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002874 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2875 seq_printf(m, "# is not a '0' or '1')\n");
2876}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002877
2878static void show_snapshot_percpu_help(struct seq_file *m)
2879{
2880 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2881#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2882 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2883 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2884#else
2885 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2886 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2887#endif
2888 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2889 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2890 seq_printf(m, "# is not a '0' or '1')\n");
2891}
2892
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002893static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2894{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002895 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002896 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2897 else
2898 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2899
2900 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002901 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2902 show_snapshot_main_help(m);
2903 else
2904 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002905}
2906#else
2907/* Should never be called */
2908static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2909#endif
2910
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002911static int s_show(struct seq_file *m, void *v)
2912{
2913 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002914 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002915
2916 if (iter->ent == NULL) {
2917 if (iter->tr) {
2918 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2919 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002920 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002921 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002922 if (iter->snapshot && trace_empty(iter))
2923 print_snapshot_help(m, iter);
2924 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002925 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002926 else
2927 trace_default_header(m);
2928
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002929 } else if (iter->leftover) {
2930 /*
2931 * If we filled the seq_file buffer earlier, we
2932 * want to just show it now.
2933 */
2934 ret = trace_print_seq(m, &iter->seq);
2935
2936 /* ret should this time be zero, but you never know */
2937 iter->leftover = ret;
2938
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002939 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002940 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002941 ret = trace_print_seq(m, &iter->seq);
2942 /*
2943 * If we overflow the seq_file buffer, then it will
2944 * ask us for this data again at start up.
2945 * Use that instead.
2946 * ret is 0 if seq_file write succeeded.
2947 * -1 otherwise.
2948 */
2949 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002950 }
2951
2952 return 0;
2953}
2954
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002955/*
2956 * Should be used after trace_array_get(), trace_types_lock
2957 * ensures that i_cdev was already initialized.
2958 */
2959static inline int tracing_get_cpu(struct inode *inode)
2960{
2961 if (inode->i_cdev) /* See trace_create_cpu_file() */
2962 return (long)inode->i_cdev - 1;
2963 return RING_BUFFER_ALL_CPUS;
2964}
2965
James Morris88e9d342009-09-22 16:43:43 -07002966static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002967 .start = s_start,
2968 .next = s_next,
2969 .stop = s_stop,
2970 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002971};
2972
Ingo Molnare309b412008-05-12 21:20:51 +02002973static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002974__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002976 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002978 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002979
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002980 if (tracing_disabled)
2981 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002982
Jiri Olsa50e18b92012-04-25 10:23:39 +02002983 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002984 if (!iter)
2985 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986
Steven Rostedt6d158a82012-06-27 20:46:14 -04002987 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2988 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002989 if (!iter->buffer_iter)
2990 goto release;
2991
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002992 /*
2993 * We make a copy of the current tracer to avoid concurrent
2994 * changes on it while we are reading.
2995 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002997 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002998 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002999 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003000
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003001 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003002
Li Zefan79f55992009-06-15 14:58:26 +08003003 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003004 goto fail;
3005
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003006 iter->tr = tr;
3007
3008#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003009 /* Currently only the top directory has a snapshot */
3010 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003011 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003012 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003013#endif
3014 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003015 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003016 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003017 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003018 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003019
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003020 /* Notify the tracer early; before we stop tracing. */
3021 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003022 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003023
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003024 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003025 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003026 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3027
David Sharp8be07092012-11-13 12:18:22 -08003028 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003029 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003030 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3031
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003032 /* stop the trace while dumping if we are not opening "snapshot" */
3033 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003034 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003035
Steven Rostedtae3b5092013-01-23 15:22:59 -05003036 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003037 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003038 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003039 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003040 }
3041 ring_buffer_read_prepare_sync();
3042 for_each_tracing_cpu(cpu) {
3043 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003044 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003045 }
3046 } else {
3047 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003048 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003050 ring_buffer_read_prepare_sync();
3051 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003052 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003053 }
3054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055 mutex_unlock(&trace_types_lock);
3056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003058
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003059 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003060 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003061 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003062 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003063release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003064 seq_release_private(inode, file);
3065 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066}
3067
3068int tracing_open_generic(struct inode *inode, struct file *filp)
3069{
Steven Rostedt60a11772008-05-12 21:20:44 +02003070 if (tracing_disabled)
3071 return -ENODEV;
3072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003073 filp->private_data = inode->i_private;
3074 return 0;
3075}
3076
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003077bool tracing_is_disabled(void)
3078{
3079 return (tracing_disabled) ? true: false;
3080}
3081
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003082/*
3083 * Open and update trace_array ref count.
3084 * Must have the current trace_array passed to it.
3085 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003086static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003087{
3088 struct trace_array *tr = inode->i_private;
3089
3090 if (tracing_disabled)
3091 return -ENODEV;
3092
3093 if (trace_array_get(tr) < 0)
3094 return -ENODEV;
3095
3096 filp->private_data = inode->i_private;
3097
3098 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003099}
3100
Hannes Eder4fd27352009-02-10 19:44:12 +01003101static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003102{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003103 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003104 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003105 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003106 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003107
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003108 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003109 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003110 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003111 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003112
Oleg Nesterov6484c712013-07-23 17:26:10 +02003113 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003114 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003115 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003116
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003117 for_each_tracing_cpu(cpu) {
3118 if (iter->buffer_iter[cpu])
3119 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3120 }
3121
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122 if (iter->trace && iter->trace->close)
3123 iter->trace->close(iter);
3124
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003125 if (!iter->snapshot)
3126 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003127 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003128
3129 __trace_array_put(tr);
3130
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003131 mutex_unlock(&trace_types_lock);
3132
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003133 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003134 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003135 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003136 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003137 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003138
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003139 return 0;
3140}
3141
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003142static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3143{
3144 struct trace_array *tr = inode->i_private;
3145
3146 trace_array_put(tr);
3147 return 0;
3148}
3149
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003150static int tracing_single_release_tr(struct inode *inode, struct file *file)
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 trace_array_put(tr);
3155
3156 return single_release(inode, file);
3157}
3158
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003159static int tracing_open(struct inode *inode, struct file *file)
3160{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003161 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003162 struct trace_iterator *iter;
3163 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003164
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003165 if (trace_array_get(tr) < 0)
3166 return -ENODEV;
3167
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003168 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003169 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3170 int cpu = tracing_get_cpu(inode);
3171
3172 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003173 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003174 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003175 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003176 }
3177
3178 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003179 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003180 if (IS_ERR(iter))
3181 ret = PTR_ERR(iter);
3182 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3183 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3184 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003185
3186 if (ret < 0)
3187 trace_array_put(tr);
3188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003189 return ret;
3190}
3191
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003192/*
3193 * Some tracers are not suitable for instance buffers.
3194 * A tracer is always available for the global array (toplevel)
3195 * or if it explicitly states that it is.
3196 */
3197static bool
3198trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3199{
3200 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3201}
3202
3203/* Find the next tracer that this trace array may use */
3204static struct tracer *
3205get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3206{
3207 while (t && !trace_ok_for_array(t, tr))
3208 t = t->next;
3209
3210 return t;
3211}
3212
Ingo Molnare309b412008-05-12 21:20:51 +02003213static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003214t_next(struct seq_file *m, void *v, loff_t *pos)
3215{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003216 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003217 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003218
3219 (*pos)++;
3220
3221 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003222 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003224 return t;
3225}
3226
3227static void *t_start(struct seq_file *m, loff_t *pos)
3228{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003229 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003230 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003231 loff_t l = 0;
3232
3233 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003234
3235 t = get_tracer_for_array(tr, trace_types);
3236 for (; t && l < *pos; t = t_next(m, t, &l))
3237 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003238
3239 return t;
3240}
3241
3242static void t_stop(struct seq_file *m, void *p)
3243{
3244 mutex_unlock(&trace_types_lock);
3245}
3246
3247static int t_show(struct seq_file *m, void *v)
3248{
3249 struct tracer *t = v;
3250
3251 if (!t)
3252 return 0;
3253
3254 seq_printf(m, "%s", t->name);
3255 if (t->next)
3256 seq_putc(m, ' ');
3257 else
3258 seq_putc(m, '\n');
3259
3260 return 0;
3261}
3262
James Morris88e9d342009-09-22 16:43:43 -07003263static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003264 .start = t_start,
3265 .next = t_next,
3266 .stop = t_stop,
3267 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003268};
3269
3270static int show_traces_open(struct inode *inode, struct file *file)
3271{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003272 struct trace_array *tr = inode->i_private;
3273 struct seq_file *m;
3274 int ret;
3275
Steven Rostedt60a11772008-05-12 21:20:44 +02003276 if (tracing_disabled)
3277 return -ENODEV;
3278
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003279 ret = seq_open(file, &show_traces_seq_ops);
3280 if (ret)
3281 return ret;
3282
3283 m = file->private_data;
3284 m->private = tr;
3285
3286 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003287}
3288
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003289static ssize_t
3290tracing_write_stub(struct file *filp, const char __user *ubuf,
3291 size_t count, loff_t *ppos)
3292{
3293 return count;
3294}
3295
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003296loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003297{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003298 int ret;
3299
Slava Pestov364829b2010-11-24 15:13:16 -08003300 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003301 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003302 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003303 file->f_pos = ret = 0;
3304
3305 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003306}
3307
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003308static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003309 .open = tracing_open,
3310 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003311 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003312 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003313 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003314};
3315
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003316static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003317 .open = show_traces_open,
3318 .read = seq_read,
3319 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003320 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003321};
3322
Ingo Molnar36dfe922008-05-12 21:20:52 +02003323/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003324 * The tracer itself will not take this lock, but still we want
3325 * to provide a consistent cpumask to user-space:
3326 */
3327static DEFINE_MUTEX(tracing_cpumask_update_lock);
3328
3329/*
3330 * Temporary storage for the character representation of the
3331 * CPU bitmask (and one more byte for the newline):
3332 */
3333static char mask_str[NR_CPUS + 1];
3334
Ingo Molnarc7078de2008-05-12 21:20:52 +02003335static ssize_t
3336tracing_cpumask_read(struct file *filp, char __user *ubuf,
3337 size_t count, loff_t *ppos)
3338{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003339 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003340 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003341
3342 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003343
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003344 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003345 if (count - len < 2) {
3346 count = -EINVAL;
3347 goto out_err;
3348 }
3349 len += sprintf(mask_str + len, "\n");
3350 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3351
3352out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003353 mutex_unlock(&tracing_cpumask_update_lock);
3354
3355 return count;
3356}
3357
3358static ssize_t
3359tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3360 size_t count, loff_t *ppos)
3361{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003362 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303363 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003364 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303365
3366 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3367 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003368
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303369 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003370 if (err)
3371 goto err_unlock;
3372
Li Zefan215368e2009-06-15 10:56:42 +08003373 mutex_lock(&tracing_cpumask_update_lock);
3374
Steven Rostedta5e25882008-12-02 15:34:05 -05003375 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003376 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003377 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003378 /*
3379 * Increase/decrease the disabled counter if we are
3380 * about to flip a bit in the cpumask:
3381 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003382 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303383 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003384 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3385 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003386 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003387 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303388 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003389 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3390 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003391 }
3392 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003393 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003394 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003395
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003396 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003397
Ingo Molnarc7078de2008-05-12 21:20:52 +02003398 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303399 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003400
Ingo Molnarc7078de2008-05-12 21:20:52 +02003401 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003402
3403err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003404 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003405
3406 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003407}
3408
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003409static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003410 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003411 .read = tracing_cpumask_read,
3412 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003413 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003414 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003415};
3416
Li Zefanfdb372e2009-12-08 11:15:59 +08003417static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003418{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003419 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003420 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003421 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003422 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003423
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003424 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003425 tracer_flags = tr->current_trace->flags->val;
3426 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003427
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003428 for (i = 0; trace_options[i]; i++) {
3429 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003430 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003432 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003433 }
3434
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003435 for (i = 0; trace_opts[i].name; i++) {
3436 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003437 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003438 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003439 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003440 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003441 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003442
Li Zefanfdb372e2009-12-08 11:15:59 +08003443 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444}
3445
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003446static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003447 struct tracer_flags *tracer_flags,
3448 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003449{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003450 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003451 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003453 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003454 if (ret)
3455 return ret;
3456
3457 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003458 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003459 else
Zhaolei77708412009-08-07 18:53:21 +08003460 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003461 return 0;
3462}
3463
Li Zefan8d18eaa2009-12-08 11:17:06 +08003464/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003465static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003466{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003467 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003468 struct tracer_flags *tracer_flags = trace->flags;
3469 struct tracer_opt *opts = NULL;
3470 int i;
3471
3472 for (i = 0; tracer_flags->opts[i].name; i++) {
3473 opts = &tracer_flags->opts[i];
3474
3475 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003476 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003477 }
3478
3479 return -EINVAL;
3480}
3481
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003482/* Some tracers require overwrite to stay enabled */
3483int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3484{
3485 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3486 return -1;
3487
3488 return 0;
3489}
3490
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003491int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003492{
3493 /* do nothing if flag is already set */
3494 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003495 return 0;
3496
3497 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003498 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003499 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003500 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003501
3502 if (enabled)
3503 trace_flags |= mask;
3504 else
3505 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003506
3507 if (mask == TRACE_ITER_RECORD_CMD)
3508 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003509
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003510 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003511 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003512#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003513 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003514#endif
3515 }
Steven Rostedt81698832012-10-11 10:15:05 -04003516
3517 if (mask == TRACE_ITER_PRINTK)
3518 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003519
3520 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003521}
3522
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003523static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003524{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003525 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003527 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003528 int i;
3529
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003530 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531
Li Zefan8d18eaa2009-12-08 11:17:06 +08003532 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 neg = 1;
3534 cmp += 2;
3535 }
3536
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003537 mutex_lock(&trace_types_lock);
3538
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003540 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003541 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542 break;
3543 }
3544 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003545
3546 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003547 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003548 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003549
3550 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003551
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003552 return ret;
3553}
3554
3555static ssize_t
3556tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3557 size_t cnt, loff_t *ppos)
3558{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003559 struct seq_file *m = filp->private_data;
3560 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003561 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003562 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003563
3564 if (cnt >= sizeof(buf))
3565 return -EINVAL;
3566
3567 if (copy_from_user(&buf, ubuf, cnt))
3568 return -EFAULT;
3569
Steven Rostedta8dd2172013-01-09 20:54:17 -05003570 buf[cnt] = 0;
3571
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003572 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003573 if (ret < 0)
3574 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003575
Jiri Olsacf8517c2009-10-23 19:36:16 -04003576 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003577
3578 return cnt;
3579}
3580
Li Zefanfdb372e2009-12-08 11:15:59 +08003581static int tracing_trace_options_open(struct inode *inode, struct file *file)
3582{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003583 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003584 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003585
Li Zefanfdb372e2009-12-08 11:15:59 +08003586 if (tracing_disabled)
3587 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003588
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003589 if (trace_array_get(tr) < 0)
3590 return -ENODEV;
3591
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003592 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3593 if (ret < 0)
3594 trace_array_put(tr);
3595
3596 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003597}
3598
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003599static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003600 .open = tracing_trace_options_open,
3601 .read = seq_read,
3602 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003603 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003604 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003605};
3606
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003607static const char readme_msg[] =
3608 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003609 "# echo 0 > tracing_on : quick way to disable tracing\n"
3610 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3611 " Important files:\n"
3612 " trace\t\t\t- The static contents of the buffer\n"
3613 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3614 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3615 " current_tracer\t- function and latency tracers\n"
3616 " available_tracers\t- list of configured tracers for current_tracer\n"
3617 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3618 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3619 " trace_clock\t\t-change the clock used to order events\n"
3620 " local: Per cpu clock but may not be synced across CPUs\n"
3621 " global: Synced across CPUs but slows tracing down.\n"
3622 " counter: Not a clock, but just an increment\n"
3623 " uptime: Jiffy counter from time of boot\n"
3624 " perf: Same clock that perf events use\n"
3625#ifdef CONFIG_X86_64
3626 " x86-tsc: TSC cycle counter\n"
3627#endif
3628 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3629 " tracing_cpumask\t- Limit which CPUs to trace\n"
3630 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3631 "\t\t\t Remove sub-buffer with rmdir\n"
3632 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003633 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3634 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003635 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003636#ifdef CONFIG_DYNAMIC_FTRACE
3637 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003638 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3639 "\t\t\t functions\n"
3640 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3641 "\t modules: Can select a group via module\n"
3642 "\t Format: :mod:<module-name>\n"
3643 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3644 "\t triggers: a command to perform when function is hit\n"
3645 "\t Format: <function>:<trigger>[:count]\n"
3646 "\t trigger: traceon, traceoff\n"
3647 "\t\t enable_event:<system>:<event>\n"
3648 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003649#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003650 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003651#endif
3652#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003653 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003654#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003655 "\t\t dump\n"
3656 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003657 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3658 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3659 "\t The first one will disable tracing every time do_fault is hit\n"
3660 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3661 "\t The first time do trap is hit and it disables tracing, the\n"
3662 "\t counter will decrement to 2. If tracing is already disabled,\n"
3663 "\t the counter will not decrement. It only decrements when the\n"
3664 "\t trigger did work\n"
3665 "\t To remove trigger without count:\n"
3666 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3667 "\t To remove trigger with a count:\n"
3668 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003669 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003670 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3671 "\t modules: Can select a group via module command :mod:\n"
3672 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003673#endif /* CONFIG_DYNAMIC_FTRACE */
3674#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003675 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3676 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003677#endif
3678#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3679 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003680 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003681 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3682#endif
3683#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003684 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3685 "\t\t\t snapshot buffer. Read the contents for more\n"
3686 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003688#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003689 " stack_trace\t\t- Shows the max stack trace when active\n"
3690 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003691 "\t\t\t Write into this file to reset the max size (trigger a\n"
3692 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003693#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003694 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3695 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003697#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003698 " events/\t\t- Directory containing all trace event subsystems:\n"
3699 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3700 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003701 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3702 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003703 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003704 " events/<system>/<event>/\t- Directory containing control files for\n"
3705 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003706 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3707 " filter\t\t- If set, only events passing filter are traced\n"
3708 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003709 "\t Format: <trigger>[:count][if <filter>]\n"
3710 "\t trigger: traceon, traceoff\n"
3711 "\t enable_event:<system>:<event>\n"
3712 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003713#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003714 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715#endif
3716#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003717 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003718#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003719 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3720 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3721 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3722 "\t events/block/block_unplug/trigger\n"
3723 "\t The first disables tracing every time block_unplug is hit.\n"
3724 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3725 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3726 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3727 "\t Like function triggers, the counter is only decremented if it\n"
3728 "\t enabled or disabled tracing.\n"
3729 "\t To remove a trigger without a count:\n"
3730 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3731 "\t To remove a trigger with a count:\n"
3732 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3733 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003734;
3735
3736static ssize_t
3737tracing_readme_read(struct file *filp, char __user *ubuf,
3738 size_t cnt, loff_t *ppos)
3739{
3740 return simple_read_from_buffer(ubuf, cnt, ppos,
3741 readme_msg, strlen(readme_msg));
3742}
3743
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003744static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003745 .open = tracing_open_generic,
3746 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003747 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003748};
3749
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003750static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003751{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003752 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003753
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003754 if (*pos || m->count)
3755 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003756
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003757 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003758
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003759 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3760 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762 continue;
3763
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003764 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765 }
3766
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003767 return NULL;
3768}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003769
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003770static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3771{
3772 void *v;
3773 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003775 preempt_disable();
3776 arch_spin_lock(&trace_cmdline_lock);
3777
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003778 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003779 while (l <= *pos) {
3780 v = saved_cmdlines_next(m, v, &l);
3781 if (!v)
3782 return NULL;
3783 }
3784
3785 return v;
3786}
3787
3788static void saved_cmdlines_stop(struct seq_file *m, void *v)
3789{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003790 arch_spin_unlock(&trace_cmdline_lock);
3791 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003792}
3793
3794static int saved_cmdlines_show(struct seq_file *m, void *v)
3795{
3796 char buf[TASK_COMM_LEN];
3797 unsigned int *pid = v;
3798
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003799 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003800 seq_printf(m, "%d %s\n", *pid, buf);
3801 return 0;
3802}
3803
3804static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3805 .start = saved_cmdlines_start,
3806 .next = saved_cmdlines_next,
3807 .stop = saved_cmdlines_stop,
3808 .show = saved_cmdlines_show,
3809};
3810
3811static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3812{
3813 if (tracing_disabled)
3814 return -ENODEV;
3815
3816 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003817}
3818
3819static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003820 .open = tracing_saved_cmdlines_open,
3821 .read = seq_read,
3822 .llseek = seq_lseek,
3823 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003824};
3825
3826static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003827tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3828 size_t cnt, loff_t *ppos)
3829{
3830 char buf[64];
3831 int r;
3832
3833 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003834 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003835 arch_spin_unlock(&trace_cmdline_lock);
3836
3837 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3838}
3839
3840static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3841{
3842 kfree(s->saved_cmdlines);
3843 kfree(s->map_cmdline_to_pid);
3844 kfree(s);
3845}
3846
3847static int tracing_resize_saved_cmdlines(unsigned int val)
3848{
3849 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3850
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003851 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003852 if (!s)
3853 return -ENOMEM;
3854
3855 if (allocate_cmdlines_buffer(val, s) < 0) {
3856 kfree(s);
3857 return -ENOMEM;
3858 }
3859
3860 arch_spin_lock(&trace_cmdline_lock);
3861 savedcmd_temp = savedcmd;
3862 savedcmd = s;
3863 arch_spin_unlock(&trace_cmdline_lock);
3864 free_saved_cmdlines_buffer(savedcmd_temp);
3865
3866 return 0;
3867}
3868
3869static ssize_t
3870tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3871 size_t cnt, loff_t *ppos)
3872{
3873 unsigned long val;
3874 int ret;
3875
3876 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3877 if (ret)
3878 return ret;
3879
3880 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3881 if (!val || val > PID_MAX_DEFAULT)
3882 return -EINVAL;
3883
3884 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3885 if (ret < 0)
3886 return ret;
3887
3888 *ppos += cnt;
3889
3890 return cnt;
3891}
3892
3893static const struct file_operations tracing_saved_cmdlines_size_fops = {
3894 .open = tracing_open_generic,
3895 .read = tracing_saved_cmdlines_size_read,
3896 .write = tracing_saved_cmdlines_size_write,
3897};
3898
3899static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003900tracing_set_trace_read(struct file *filp, char __user *ubuf,
3901 size_t cnt, loff_t *ppos)
3902{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003903 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003904 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003905 int r;
3906
3907 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003908 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909 mutex_unlock(&trace_types_lock);
3910
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003911 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912}
3913
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003914int tracer_init(struct tracer *t, struct trace_array *tr)
3915{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003916 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003917 return t->init(tr);
3918}
3919
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003920static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003921{
3922 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003923
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003924 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003925 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003926}
3927
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003928#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003929/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003930static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3931 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003932{
3933 int cpu, ret = 0;
3934
3935 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3936 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937 ret = ring_buffer_resize(trace_buf->buffer,
3938 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003939 if (ret < 0)
3940 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003941 per_cpu_ptr(trace_buf->data, cpu)->entries =
3942 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003943 }
3944 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003945 ret = ring_buffer_resize(trace_buf->buffer,
3946 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003947 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003948 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3949 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003950 }
3951
3952 return ret;
3953}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003955
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003956static int __tracing_resize_ring_buffer(struct trace_array *tr,
3957 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003958{
3959 int ret;
3960
3961 /*
3962 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003963 * we use the size that was given, and we can forget about
3964 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003965 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003966 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003967
Steven Rostedtb382ede62012-10-10 21:44:34 -04003968 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003969 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003970 return 0;
3971
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003972 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003973 if (ret < 0)
3974 return ret;
3975
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003976#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003977 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3978 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003979 goto out;
3980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003982 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3984 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003985 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003986 /*
3987 * AARGH! We are left with different
3988 * size max buffer!!!!
3989 * The max buffer is our "snapshot" buffer.
3990 * When a tracer needs a snapshot (one of the
3991 * latency tracers), it swaps the max buffer
3992 * with the saved snap shot. We succeeded to
3993 * update the size of the main buffer, but failed to
3994 * update the size of the max buffer. But when we tried
3995 * to reset the main buffer to the original size, we
3996 * failed there too. This is very unlikely to
3997 * happen, but if it does, warn and kill all
3998 * tracing.
3999 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004000 WARN_ON(1);
4001 tracing_disabled = 1;
4002 }
4003 return ret;
4004 }
4005
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004006 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004007 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004008 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004009 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004010
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004011 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004012#endif /* CONFIG_TRACER_MAX_TRACE */
4013
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004014 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004015 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004016 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004017 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004018
4019 return ret;
4020}
4021
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004022static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4023 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004024{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004025 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004026
4027 mutex_lock(&trace_types_lock);
4028
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004029 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4030 /* make sure, this cpu is enabled in the mask */
4031 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4032 ret = -EINVAL;
4033 goto out;
4034 }
4035 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004036
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004037 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004038 if (ret < 0)
4039 ret = -ENOMEM;
4040
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004041out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004042 mutex_unlock(&trace_types_lock);
4043
4044 return ret;
4045}
4046
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004047
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004048/**
4049 * tracing_update_buffers - used by tracing facility to expand ring buffers
4050 *
4051 * To save on memory when the tracing is never used on a system with it
4052 * configured in. The ring buffers are set to a minimum size. But once
4053 * a user starts to use the tracing facility, then they need to grow
4054 * to their default size.
4055 *
4056 * This function is to be called when a tracer is about to be used.
4057 */
4058int tracing_update_buffers(void)
4059{
4060 int ret = 0;
4061
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004062 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004063 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004064 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004065 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004066 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004067
4068 return ret;
4069}
4070
Steven Rostedt577b7852009-02-26 23:43:05 -05004071struct trace_option_dentry;
4072
4073static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004074create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004075
4076static void
4077destroy_trace_option_files(struct trace_option_dentry *topts);
4078
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004079/*
4080 * Used to clear out the tracer before deletion of an instance.
4081 * Must have trace_types_lock held.
4082 */
4083static void tracing_set_nop(struct trace_array *tr)
4084{
4085 if (tr->current_trace == &nop_trace)
4086 return;
4087
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004088 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004089
4090 if (tr->current_trace->reset)
4091 tr->current_trace->reset(tr);
4092
4093 tr->current_trace = &nop_trace;
4094}
4095
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004096static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004097{
Steven Rostedt577b7852009-02-26 23:43:05 -05004098 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004100#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004101 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004102#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004103 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004104
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004105 mutex_lock(&trace_types_lock);
4106
Steven Rostedt73c51622009-03-11 13:42:01 -04004107 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004108 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004109 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004110 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004111 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004112 ret = 0;
4113 }
4114
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004115 for (t = trace_types; t; t = t->next) {
4116 if (strcmp(t->name, buf) == 0)
4117 break;
4118 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004119 if (!t) {
4120 ret = -EINVAL;
4121 goto out;
4122 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004123 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004124 goto out;
4125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004126 /* Some tracers are only allowed for the top level buffer */
4127 if (!trace_ok_for_array(t, tr)) {
4128 ret = -EINVAL;
4129 goto out;
4130 }
4131
Steven Rostedt9f029e82008-11-12 15:24:24 -05004132 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004133
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004134 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004135
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004136 if (tr->current_trace->reset)
4137 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004138
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004139 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004140 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004141
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004142#ifdef CONFIG_TRACER_MAX_TRACE
4143 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004144
4145 if (had_max_tr && !t->use_max_tr) {
4146 /*
4147 * We need to make sure that the update_max_tr sees that
4148 * current_trace changed to nop_trace to keep it from
4149 * swapping the buffers after we resize it.
4150 * The update_max_tr is called from interrupts disabled
4151 * so a synchronized_sched() is sufficient.
4152 */
4153 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004154 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004155 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004156#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004157 /* Currently, only the top instance has options */
4158 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4159 destroy_trace_option_files(topts);
4160 topts = create_trace_option_files(tr, t);
4161 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004162
4163#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004164 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004165 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004166 if (ret < 0)
4167 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004168 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004169#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004170
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004171 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004172 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004173 if (ret)
4174 goto out;
4175 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004176
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004177 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004178 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004179 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004180 out:
4181 mutex_unlock(&trace_types_lock);
4182
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004183 return ret;
4184}
4185
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004186static ssize_t
4187tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4188 size_t cnt, loff_t *ppos)
4189{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004190 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004191 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004192 int i;
4193 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004194 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195
Steven Rostedt60063a62008-10-28 10:44:24 -04004196 ret = cnt;
4197
Li Zefanee6c2c12009-09-18 14:06:47 +08004198 if (cnt > MAX_TRACER_SIZE)
4199 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004200
4201 if (copy_from_user(&buf, ubuf, cnt))
4202 return -EFAULT;
4203
4204 buf[cnt] = 0;
4205
4206 /* strip ending whitespace. */
4207 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4208 buf[i] = 0;
4209
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004210 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004211 if (err)
4212 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004213
Jiri Olsacf8517c2009-10-23 19:36:16 -04004214 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004215
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004216 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004217}
4218
4219static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004220tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4221 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004223 char buf[64];
4224 int r;
4225
Steven Rostedtcffae432008-05-12 21:21:00 +02004226 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004227 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004228 if (r > sizeof(buf))
4229 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004230 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004231}
4232
4233static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004234tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4235 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004236{
Hannes Eder5e398412009-02-10 19:44:34 +01004237 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004238 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239
Peter Huewe22fe9b52011-06-07 21:58:27 +02004240 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4241 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004242 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004243
4244 *ptr = val * 1000;
4245
4246 return cnt;
4247}
4248
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004249static ssize_t
4250tracing_thresh_read(struct file *filp, char __user *ubuf,
4251 size_t cnt, loff_t *ppos)
4252{
4253 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4254}
4255
4256static ssize_t
4257tracing_thresh_write(struct file *filp, const char __user *ubuf,
4258 size_t cnt, loff_t *ppos)
4259{
4260 struct trace_array *tr = filp->private_data;
4261 int ret;
4262
4263 mutex_lock(&trace_types_lock);
4264 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4265 if (ret < 0)
4266 goto out;
4267
4268 if (tr->current_trace->update_thresh) {
4269 ret = tr->current_trace->update_thresh(tr);
4270 if (ret < 0)
4271 goto out;
4272 }
4273
4274 ret = cnt;
4275out:
4276 mutex_unlock(&trace_types_lock);
4277
4278 return ret;
4279}
4280
4281static ssize_t
4282tracing_max_lat_read(struct file *filp, char __user *ubuf,
4283 size_t cnt, loff_t *ppos)
4284{
4285 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4286}
4287
4288static ssize_t
4289tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4290 size_t cnt, loff_t *ppos)
4291{
4292 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4293}
4294
Steven Rostedtb3806b42008-05-12 21:20:46 +02004295static int tracing_open_pipe(struct inode *inode, struct file *filp)
4296{
Oleg Nesterov15544202013-07-23 17:25:57 +02004297 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004298 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004299 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004300
4301 if (tracing_disabled)
4302 return -ENODEV;
4303
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004304 if (trace_array_get(tr) < 0)
4305 return -ENODEV;
4306
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004307 mutex_lock(&trace_types_lock);
4308
Steven Rostedtb3806b42008-05-12 21:20:46 +02004309 /* create a buffer to store the information to pass to userspace */
4310 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004311 if (!iter) {
4312 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004313 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004314 goto out;
4315 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004316
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004317 /*
4318 * We make a copy of the current tracer to avoid concurrent
4319 * changes on it while we are reading.
4320 */
4321 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4322 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004323 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004324 goto fail;
4325 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004326 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004327
4328 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4329 ret = -ENOMEM;
4330 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304331 }
4332
Steven Rostedta3097202008-11-07 22:36:02 -05004333 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304334 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004335
Steven Rostedt112f38a72009-06-01 15:16:05 -04004336 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4337 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4338
David Sharp8be07092012-11-13 12:18:22 -08004339 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004340 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004341 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4342
Oleg Nesterov15544202013-07-23 17:25:57 +02004343 iter->tr = tr;
4344 iter->trace_buffer = &tr->trace_buffer;
4345 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004346 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004347 filp->private_data = iter;
4348
Steven Rostedt107bad82008-05-12 21:21:01 +02004349 if (iter->trace->pipe_open)
4350 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004351
Arnd Bergmannb4447862010-07-07 23:40:11 +02004352 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004353out:
4354 mutex_unlock(&trace_types_lock);
4355 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004356
4357fail:
4358 kfree(iter->trace);
4359 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004360 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004361 mutex_unlock(&trace_types_lock);
4362 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004363}
4364
4365static int tracing_release_pipe(struct inode *inode, struct file *file)
4366{
4367 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004368 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004369
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004370 mutex_lock(&trace_types_lock);
4371
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004372 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004373 iter->trace->pipe_close(iter);
4374
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004375 mutex_unlock(&trace_types_lock);
4376
Rusty Russell44623442009-01-01 10:12:23 +10304377 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004378 mutex_destroy(&iter->mutex);
4379 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004380 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004381
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004382 trace_array_put(tr);
4383
Steven Rostedtb3806b42008-05-12 21:20:46 +02004384 return 0;
4385}
4386
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004387static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004388trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004389{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004390 /* Iterators are static, they should be filled or empty */
4391 if (trace_buffer_iter(iter, iter->cpu_file))
4392 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004393
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004394 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004395 /*
4396 * Always select as readable when in blocking mode
4397 */
4398 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004399 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004400 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004401 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004402}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004403
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004404static unsigned int
4405tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4406{
4407 struct trace_iterator *iter = filp->private_data;
4408
4409 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004410}
4411
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004412/* Must be called with trace_types_lock mutex held. */
4413static int tracing_wait_pipe(struct file *filp)
4414{
4415 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004416 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004417
4418 while (trace_empty(iter)) {
4419
4420 if ((filp->f_flags & O_NONBLOCK)) {
4421 return -EAGAIN;
4422 }
4423
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004424 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004425 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004426 * We still block if tracing is disabled, but we have never
4427 * read anything. This allows a user to cat this file, and
4428 * then enable tracing. But after we have read something,
4429 * we give an EOF when tracing is again disabled.
4430 *
4431 * iter->pos will be 0 if we haven't read anything.
4432 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004433 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004434 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004435
4436 mutex_unlock(&iter->mutex);
4437
Rabin Vincente30f53a2014-11-10 19:46:34 +01004438 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004439
4440 mutex_lock(&iter->mutex);
4441
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004442 if (ret)
4443 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004444 }
4445
4446 return 1;
4447}
4448
Steven Rostedtb3806b42008-05-12 21:20:46 +02004449/*
4450 * Consumer reader.
4451 */
4452static ssize_t
4453tracing_read_pipe(struct file *filp, char __user *ubuf,
4454 size_t cnt, loff_t *ppos)
4455{
4456 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004457 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004458 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004459
4460 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004461 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4462 if (sret != -EBUSY)
4463 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004464
Steven Rostedtf9520752009-03-02 14:04:40 -05004465 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004466
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004467 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004468 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004469 if (unlikely(iter->trace->name != tr->current_trace->name))
4470 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004471 mutex_unlock(&trace_types_lock);
4472
4473 /*
4474 * Avoid more than one consumer on a single file descriptor
4475 * This is just a matter of traces coherency, the ring buffer itself
4476 * is protected.
4477 */
4478 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004479 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004480 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4481 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004482 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004483 }
4484
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004485waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004486 sret = tracing_wait_pipe(filp);
4487 if (sret <= 0)
4488 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004489
4490 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004491 if (trace_empty(iter)) {
4492 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004493 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004494 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004495
4496 if (cnt >= PAGE_SIZE)
4497 cnt = PAGE_SIZE - 1;
4498
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004499 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004500 memset(&iter->seq, 0,
4501 sizeof(struct trace_iterator) -
4502 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004503 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004504 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004505
Lai Jiangshan4f535962009-05-18 19:35:34 +08004506 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004507 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004508 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004509 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004510 int len = iter->seq.len;
4511
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004512 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004513 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004514 /* don't print partial lines */
4515 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004516 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004517 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004518 if (ret != TRACE_TYPE_NO_CONSUME)
4519 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004520
4521 if (iter->seq.len >= cnt)
4522 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004523
4524 /*
4525 * Setting the full flag means we reached the trace_seq buffer
4526 * size and we should leave by partial output condition above.
4527 * One of the trace_seq_* functions is not used properly.
4528 */
4529 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4530 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004531 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004532 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004533 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004534
Steven Rostedtb3806b42008-05-12 21:20:46 +02004535 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004536 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4537 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004538 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004539
4540 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004541 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004542 * entries, go back to wait for more entries.
4543 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004544 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004545 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004546
Steven Rostedt107bad82008-05-12 21:21:01 +02004547out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004548 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004549
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004550 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004551}
4552
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004553static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4554 unsigned int idx)
4555{
4556 __free_page(spd->pages[idx]);
4557}
4558
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004559static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004560 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004561 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004562 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004563 .steal = generic_pipe_buf_steal,
4564 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004565};
4566
Steven Rostedt34cd4992009-02-09 12:06:29 -05004567static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004568tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004569{
4570 size_t count;
4571 int ret;
4572
4573 /* Seq buffer is page-sized, exactly what we need. */
4574 for (;;) {
4575 count = iter->seq.len;
4576 ret = print_trace_line(iter);
4577 count = iter->seq.len - count;
4578 if (rem < count) {
4579 rem = 0;
4580 iter->seq.len -= count;
4581 break;
4582 }
4583 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4584 iter->seq.len -= count;
4585 break;
4586 }
4587
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004588 if (ret != TRACE_TYPE_NO_CONSUME)
4589 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004590 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004591 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004592 rem = 0;
4593 iter->ent = NULL;
4594 break;
4595 }
4596 }
4597
4598 return rem;
4599}
4600
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004601static ssize_t tracing_splice_read_pipe(struct file *filp,
4602 loff_t *ppos,
4603 struct pipe_inode_info *pipe,
4604 size_t len,
4605 unsigned int flags)
4606{
Jens Axboe35f3d142010-05-20 10:43:18 +02004607 struct page *pages_def[PIPE_DEF_BUFFERS];
4608 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004609 struct trace_iterator *iter = filp->private_data;
4610 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004611 .pages = pages_def,
4612 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004613 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004614 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004615 .flags = flags,
4616 .ops = &tracing_pipe_buf_ops,
4617 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004618 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004619 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004620 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004621 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004622 unsigned int i;
4623
Jens Axboe35f3d142010-05-20 10:43:18 +02004624 if (splice_grow_spd(pipe, &spd))
4625 return -ENOMEM;
4626
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004627 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004628 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004629 if (unlikely(iter->trace->name != tr->current_trace->name))
4630 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004631 mutex_unlock(&trace_types_lock);
4632
4633 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004634
4635 if (iter->trace->splice_read) {
4636 ret = iter->trace->splice_read(iter, filp,
4637 ppos, pipe, len, flags);
4638 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004639 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004640 }
4641
4642 ret = tracing_wait_pipe(filp);
4643 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004644 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004645
Jason Wessel955b61e2010-08-05 09:22:23 -05004646 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004647 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004648 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004649 }
4650
Lai Jiangshan4f535962009-05-18 19:35:34 +08004651 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004652 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004653
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004655 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004656 spd.pages[i] = alloc_page(GFP_KERNEL);
4657 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004658 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004659
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004660 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004661
4662 /* Copy the data into the page, so we can start over. */
4663 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004664 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004665 iter->seq.len);
4666 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004667 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004668 break;
4669 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004670 spd.partial[i].offset = 0;
4671 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004672
Steven Rostedtf9520752009-03-02 14:04:40 -05004673 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004674 }
4675
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004676 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004677 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004678 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004679
4680 spd.nr_pages = i;
4681
Jens Axboe35f3d142010-05-20 10:43:18 +02004682 ret = splice_to_pipe(pipe, &spd);
4683out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004684 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004685 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004686
Steven Rostedt34cd4992009-02-09 12:06:29 -05004687out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004688 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004689 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004690}
4691
Steven Rostedta98a3c32008-05-12 21:20:59 +02004692static ssize_t
4693tracing_entries_read(struct file *filp, char __user *ubuf,
4694 size_t cnt, loff_t *ppos)
4695{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004696 struct inode *inode = file_inode(filp);
4697 struct trace_array *tr = inode->i_private;
4698 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004699 char buf[64];
4700 int r = 0;
4701 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004702
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004703 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004704
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004705 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004706 int cpu, buf_size_same;
4707 unsigned long size;
4708
4709 size = 0;
4710 buf_size_same = 1;
4711 /* check if all cpu sizes are same */
4712 for_each_tracing_cpu(cpu) {
4713 /* fill in the size from first enabled cpu */
4714 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004715 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4716 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004717 buf_size_same = 0;
4718 break;
4719 }
4720 }
4721
4722 if (buf_size_same) {
4723 if (!ring_buffer_expanded)
4724 r = sprintf(buf, "%lu (expanded: %lu)\n",
4725 size >> 10,
4726 trace_buf_size >> 10);
4727 else
4728 r = sprintf(buf, "%lu\n", size >> 10);
4729 } else
4730 r = sprintf(buf, "X\n");
4731 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004732 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004733
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004734 mutex_unlock(&trace_types_lock);
4735
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004736 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4737 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004738}
4739
4740static ssize_t
4741tracing_entries_write(struct file *filp, const char __user *ubuf,
4742 size_t cnt, loff_t *ppos)
4743{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004744 struct inode *inode = file_inode(filp);
4745 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004746 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004747 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004748
Peter Huewe22fe9b52011-06-07 21:58:27 +02004749 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4750 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004751 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004752
4753 /* must have at least 1 entry */
4754 if (!val)
4755 return -EINVAL;
4756
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004757 /* value is in KB */
4758 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004759 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004760 if (ret < 0)
4761 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004762
Jiri Olsacf8517c2009-10-23 19:36:16 -04004763 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004764
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004765 return cnt;
4766}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004767
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004768static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004769tracing_total_entries_read(struct file *filp, char __user *ubuf,
4770 size_t cnt, loff_t *ppos)
4771{
4772 struct trace_array *tr = filp->private_data;
4773 char buf[64];
4774 int r, cpu;
4775 unsigned long size = 0, expanded_size = 0;
4776
4777 mutex_lock(&trace_types_lock);
4778 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004779 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004780 if (!ring_buffer_expanded)
4781 expanded_size += trace_buf_size >> 10;
4782 }
4783 if (ring_buffer_expanded)
4784 r = sprintf(buf, "%lu\n", size);
4785 else
4786 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4787 mutex_unlock(&trace_types_lock);
4788
4789 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4790}
4791
4792static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004793tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4794 size_t cnt, loff_t *ppos)
4795{
4796 /*
4797 * There is no need to read what the user has written, this function
4798 * is just to make sure that there is no error when "echo" is used
4799 */
4800
4801 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004802
4803 return cnt;
4804}
4805
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004806static int
4807tracing_free_buffer_release(struct inode *inode, struct file *filp)
4808{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004809 struct trace_array *tr = inode->i_private;
4810
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004811 /* disable tracing ? */
4812 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004813 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004814 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004815 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004816
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004817 trace_array_put(tr);
4818
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004819 return 0;
4820}
4821
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004822static ssize_t
4823tracing_mark_write(struct file *filp, const char __user *ubuf,
4824 size_t cnt, loff_t *fpos)
4825{
Steven Rostedtd696b582011-09-22 11:50:27 -04004826 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004827 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004828 struct ring_buffer_event *event;
4829 struct ring_buffer *buffer;
4830 struct print_entry *entry;
4831 unsigned long irq_flags;
4832 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004833 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004834 int nr_pages = 1;
4835 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004836 int offset;
4837 int size;
4838 int len;
4839 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004840 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004841
Steven Rostedtc76f0692008-11-07 22:36:02 -05004842 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004843 return -EINVAL;
4844
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004845 if (!(trace_flags & TRACE_ITER_MARKERS))
4846 return -EINVAL;
4847
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004848 if (cnt > TRACE_BUF_SIZE)
4849 cnt = TRACE_BUF_SIZE;
4850
Steven Rostedtd696b582011-09-22 11:50:27 -04004851 /*
4852 * Userspace is injecting traces into the kernel trace buffer.
4853 * We want to be as non intrusive as possible.
4854 * To do so, we do not want to allocate any special buffers
4855 * or take any locks, but instead write the userspace data
4856 * straight into the ring buffer.
4857 *
4858 * First we need to pin the userspace buffer into memory,
4859 * which, most likely it is, because it just referenced it.
4860 * But there's no guarantee that it is. By using get_user_pages_fast()
4861 * and kmap_atomic/kunmap_atomic() we can get access to the
4862 * pages directly. We then write the data directly into the
4863 * ring buffer.
4864 */
4865 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004866
Steven Rostedtd696b582011-09-22 11:50:27 -04004867 /* check if we cross pages */
4868 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4869 nr_pages = 2;
4870
4871 offset = addr & (PAGE_SIZE - 1);
4872 addr &= PAGE_MASK;
4873
4874 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4875 if (ret < nr_pages) {
4876 while (--ret >= 0)
4877 put_page(pages[ret]);
4878 written = -EFAULT;
4879 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004880 }
4881
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004882 for (i = 0; i < nr_pages; i++)
4883 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004884
4885 local_save_flags(irq_flags);
4886 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004887 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004888 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4889 irq_flags, preempt_count());
4890 if (!event) {
4891 /* Ring buffer disabled, return as if not open for write */
4892 written = -EBADF;
4893 goto out_unlock;
4894 }
4895
4896 entry = ring_buffer_event_data(event);
4897 entry->ip = _THIS_IP_;
4898
4899 if (nr_pages == 2) {
4900 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004901 memcpy(&entry->buf, map_page[0] + offset, len);
4902 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004903 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004904 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004905
4906 if (entry->buf[cnt - 1] != '\n') {
4907 entry->buf[cnt] = '\n';
4908 entry->buf[cnt + 1] = '\0';
4909 } else
4910 entry->buf[cnt] = '\0';
4911
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004912 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004913
4914 written = cnt;
4915
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004916 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004917
Steven Rostedtd696b582011-09-22 11:50:27 -04004918 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004919 for (i = 0; i < nr_pages; i++){
4920 kunmap_atomic(map_page[i]);
4921 put_page(pages[i]);
4922 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004923 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004924 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004925}
4926
Li Zefan13f16d22009-12-08 11:16:11 +08004927static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004928{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004929 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004930 int i;
4931
4932 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004933 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004934 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004935 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4936 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004937 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004938
Li Zefan13f16d22009-12-08 11:16:11 +08004939 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004940}
4941
Steven Rostedte1e232c2014-02-10 23:38:46 -05004942static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004943{
Zhaolei5079f322009-08-25 16:12:56 +08004944 int i;
4945
Zhaolei5079f322009-08-25 16:12:56 +08004946 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4947 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4948 break;
4949 }
4950 if (i == ARRAY_SIZE(trace_clocks))
4951 return -EINVAL;
4952
Zhaolei5079f322009-08-25 16:12:56 +08004953 mutex_lock(&trace_types_lock);
4954
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004955 tr->clock_id = i;
4956
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004957 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004958
David Sharp60303ed2012-10-11 16:27:52 -07004959 /*
4960 * New clock may not be consistent with the previous clock.
4961 * Reset the buffer so that it doesn't have incomparable timestamps.
4962 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004963 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004964
4965#ifdef CONFIG_TRACER_MAX_TRACE
4966 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4967 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004968 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004969#endif
David Sharp60303ed2012-10-11 16:27:52 -07004970
Zhaolei5079f322009-08-25 16:12:56 +08004971 mutex_unlock(&trace_types_lock);
4972
Steven Rostedte1e232c2014-02-10 23:38:46 -05004973 return 0;
4974}
4975
4976static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4977 size_t cnt, loff_t *fpos)
4978{
4979 struct seq_file *m = filp->private_data;
4980 struct trace_array *tr = m->private;
4981 char buf[64];
4982 const char *clockstr;
4983 int ret;
4984
4985 if (cnt >= sizeof(buf))
4986 return -EINVAL;
4987
4988 if (copy_from_user(&buf, ubuf, cnt))
4989 return -EFAULT;
4990
4991 buf[cnt] = 0;
4992
4993 clockstr = strstrip(buf);
4994
4995 ret = tracing_set_clock(tr, clockstr);
4996 if (ret)
4997 return ret;
4998
Zhaolei5079f322009-08-25 16:12:56 +08004999 *fpos += cnt;
5000
5001 return cnt;
5002}
5003
Li Zefan13f16d22009-12-08 11:16:11 +08005004static int tracing_clock_open(struct inode *inode, struct file *file)
5005{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005006 struct trace_array *tr = inode->i_private;
5007 int ret;
5008
Li Zefan13f16d22009-12-08 11:16:11 +08005009 if (tracing_disabled)
5010 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005011
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005012 if (trace_array_get(tr))
5013 return -ENODEV;
5014
5015 ret = single_open(file, tracing_clock_show, inode->i_private);
5016 if (ret < 0)
5017 trace_array_put(tr);
5018
5019 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005020}
5021
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005022struct ftrace_buffer_info {
5023 struct trace_iterator iter;
5024 void *spare;
5025 unsigned int read;
5026};
5027
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005028#ifdef CONFIG_TRACER_SNAPSHOT
5029static int tracing_snapshot_open(struct inode *inode, struct file *file)
5030{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005031 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005032 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005033 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005034 int ret = 0;
5035
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005036 if (trace_array_get(tr) < 0)
5037 return -ENODEV;
5038
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005039 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005040 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005041 if (IS_ERR(iter))
5042 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005043 } else {
5044 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005045 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005046 m = kzalloc(sizeof(*m), GFP_KERNEL);
5047 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005048 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005049 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5050 if (!iter) {
5051 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005052 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005053 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005054 ret = 0;
5055
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005056 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005057 iter->trace_buffer = &tr->max_buffer;
5058 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005059 m->private = iter;
5060 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005061 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005062out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005063 if (ret < 0)
5064 trace_array_put(tr);
5065
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005066 return ret;
5067}
5068
5069static ssize_t
5070tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5071 loff_t *ppos)
5072{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005073 struct seq_file *m = filp->private_data;
5074 struct trace_iterator *iter = m->private;
5075 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005076 unsigned long val;
5077 int ret;
5078
5079 ret = tracing_update_buffers();
5080 if (ret < 0)
5081 return ret;
5082
5083 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5084 if (ret)
5085 return ret;
5086
5087 mutex_lock(&trace_types_lock);
5088
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005089 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005090 ret = -EBUSY;
5091 goto out;
5092 }
5093
5094 switch (val) {
5095 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005096 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5097 ret = -EINVAL;
5098 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005099 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005100 if (tr->allocated_snapshot)
5101 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005102 break;
5103 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005104/* Only allow per-cpu swap if the ring buffer supports it */
5105#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5106 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5107 ret = -EINVAL;
5108 break;
5109 }
5110#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005111 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005112 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005113 if (ret < 0)
5114 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005115 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005116 local_irq_disable();
5117 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005118 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005119 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005120 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005121 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005122 local_irq_enable();
5123 break;
5124 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005125 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005126 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5127 tracing_reset_online_cpus(&tr->max_buffer);
5128 else
5129 tracing_reset(&tr->max_buffer, iter->cpu_file);
5130 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005131 break;
5132 }
5133
5134 if (ret >= 0) {
5135 *ppos += cnt;
5136 ret = cnt;
5137 }
5138out:
5139 mutex_unlock(&trace_types_lock);
5140 return ret;
5141}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005142
5143static int tracing_snapshot_release(struct inode *inode, struct file *file)
5144{
5145 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005146 int ret;
5147
5148 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005149
5150 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005151 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005152
5153 /* If write only, the seq_file is just a stub */
5154 if (m)
5155 kfree(m->private);
5156 kfree(m);
5157
5158 return 0;
5159}
5160
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005161static int tracing_buffers_open(struct inode *inode, struct file *filp);
5162static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5163 size_t count, loff_t *ppos);
5164static int tracing_buffers_release(struct inode *inode, struct file *file);
5165static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5166 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5167
5168static int snapshot_raw_open(struct inode *inode, struct file *filp)
5169{
5170 struct ftrace_buffer_info *info;
5171 int ret;
5172
5173 ret = tracing_buffers_open(inode, filp);
5174 if (ret < 0)
5175 return ret;
5176
5177 info = filp->private_data;
5178
5179 if (info->iter.trace->use_max_tr) {
5180 tracing_buffers_release(inode, filp);
5181 return -EBUSY;
5182 }
5183
5184 info->iter.snapshot = true;
5185 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5186
5187 return ret;
5188}
5189
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005190#endif /* CONFIG_TRACER_SNAPSHOT */
5191
5192
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005193static const struct file_operations tracing_thresh_fops = {
5194 .open = tracing_open_generic,
5195 .read = tracing_thresh_read,
5196 .write = tracing_thresh_write,
5197 .llseek = generic_file_llseek,
5198};
5199
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005200static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005201 .open = tracing_open_generic,
5202 .read = tracing_max_lat_read,
5203 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005204 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005205};
5206
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005207static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005208 .open = tracing_open_generic,
5209 .read = tracing_set_trace_read,
5210 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005211 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005212};
5213
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005214static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005215 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005216 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005217 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005218 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005219 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005220 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005221};
5222
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005223static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005224 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005225 .read = tracing_entries_read,
5226 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005227 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005228 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005229};
5230
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005231static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005232 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005233 .read = tracing_total_entries_read,
5234 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005235 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005236};
5237
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005238static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005239 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005240 .write = tracing_free_buffer_write,
5241 .release = tracing_free_buffer_release,
5242};
5243
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005244static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005245 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005246 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005247 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005248 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005249};
5250
Zhaolei5079f322009-08-25 16:12:56 +08005251static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005252 .open = tracing_clock_open,
5253 .read = seq_read,
5254 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005255 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005256 .write = tracing_clock_write,
5257};
5258
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005259#ifdef CONFIG_TRACER_SNAPSHOT
5260static const struct file_operations snapshot_fops = {
5261 .open = tracing_snapshot_open,
5262 .read = seq_read,
5263 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005264 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005265 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005266};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005267
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005268static const struct file_operations snapshot_raw_fops = {
5269 .open = snapshot_raw_open,
5270 .read = tracing_buffers_read,
5271 .release = tracing_buffers_release,
5272 .splice_read = tracing_buffers_splice_read,
5273 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005274};
5275
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005276#endif /* CONFIG_TRACER_SNAPSHOT */
5277
Steven Rostedt2cadf912008-12-01 22:20:19 -05005278static int tracing_buffers_open(struct inode *inode, struct file *filp)
5279{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005280 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005281 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005282 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005283
5284 if (tracing_disabled)
5285 return -ENODEV;
5286
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005287 if (trace_array_get(tr) < 0)
5288 return -ENODEV;
5289
Steven Rostedt2cadf912008-12-01 22:20:19 -05005290 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005291 if (!info) {
5292 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005293 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005294 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005295
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005296 mutex_lock(&trace_types_lock);
5297
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005298 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005299 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005300 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005301 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005302 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005303 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005304 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005305
5306 filp->private_data = info;
5307
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005308 mutex_unlock(&trace_types_lock);
5309
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005310 ret = nonseekable_open(inode, filp);
5311 if (ret < 0)
5312 trace_array_put(tr);
5313
5314 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315}
5316
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005317static unsigned int
5318tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5319{
5320 struct ftrace_buffer_info *info = filp->private_data;
5321 struct trace_iterator *iter = &info->iter;
5322
5323 return trace_poll(iter, filp, poll_table);
5324}
5325
Steven Rostedt2cadf912008-12-01 22:20:19 -05005326static ssize_t
5327tracing_buffers_read(struct file *filp, char __user *ubuf,
5328 size_t count, loff_t *ppos)
5329{
5330 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005331 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005332 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005333 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005334
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005335 if (!count)
5336 return 0;
5337
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005338 mutex_lock(&trace_types_lock);
5339
5340#ifdef CONFIG_TRACER_MAX_TRACE
5341 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5342 size = -EBUSY;
5343 goto out_unlock;
5344 }
5345#endif
5346
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005347 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005348 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5349 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005350 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005351 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005352 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005353
Steven Rostedt2cadf912008-12-01 22:20:19 -05005354 /* Do we have previous read data to read? */
5355 if (info->read < PAGE_SIZE)
5356 goto read;
5357
Steven Rostedtb6273442013-02-28 13:44:11 -05005358 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005359 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005360 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005361 &info->spare,
5362 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005363 iter->cpu_file, 0);
5364 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005365
5366 if (ret < 0) {
5367 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005368 if ((filp->f_flags & O_NONBLOCK)) {
5369 size = -EAGAIN;
5370 goto out_unlock;
5371 }
5372 mutex_unlock(&trace_types_lock);
Rabin Vincente30f53a2014-11-10 19:46:34 +01005373 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005374 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005375 if (ret) {
5376 size = ret;
5377 goto out_unlock;
5378 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005379 goto again;
5380 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005381 size = 0;
5382 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005383 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005384
Steven Rostedt436fc282011-10-14 10:44:25 -04005385 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005386 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005387 size = PAGE_SIZE - info->read;
5388 if (size > count)
5389 size = count;
5390
5391 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005392 if (ret == size) {
5393 size = -EFAULT;
5394 goto out_unlock;
5395 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005396 size -= ret;
5397
Steven Rostedt2cadf912008-12-01 22:20:19 -05005398 *ppos += size;
5399 info->read += size;
5400
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005401 out_unlock:
5402 mutex_unlock(&trace_types_lock);
5403
Steven Rostedt2cadf912008-12-01 22:20:19 -05005404 return size;
5405}
5406
5407static int tracing_buffers_release(struct inode *inode, struct file *file)
5408{
5409 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005410 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005411
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005412 mutex_lock(&trace_types_lock);
5413
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005414 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005415
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005416 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005417 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005418 kfree(info);
5419
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005420 mutex_unlock(&trace_types_lock);
5421
Steven Rostedt2cadf912008-12-01 22:20:19 -05005422 return 0;
5423}
5424
5425struct buffer_ref {
5426 struct ring_buffer *buffer;
5427 void *page;
5428 int ref;
5429};
5430
5431static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5432 struct pipe_buffer *buf)
5433{
5434 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5435
5436 if (--ref->ref)
5437 return;
5438
5439 ring_buffer_free_read_page(ref->buffer, ref->page);
5440 kfree(ref);
5441 buf->private = 0;
5442}
5443
Steven Rostedt2cadf912008-12-01 22:20:19 -05005444static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5445 struct pipe_buffer *buf)
5446{
5447 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5448
5449 ref->ref++;
5450}
5451
5452/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005453static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005454 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005455 .confirm = generic_pipe_buf_confirm,
5456 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005457 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005458 .get = buffer_pipe_buf_get,
5459};
5460
5461/*
5462 * Callback from splice_to_pipe(), if we need to release some pages
5463 * at the end of the spd in case we error'ed out in filling the pipe.
5464 */
5465static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5466{
5467 struct buffer_ref *ref =
5468 (struct buffer_ref *)spd->partial[i].private;
5469
5470 if (--ref->ref)
5471 return;
5472
5473 ring_buffer_free_read_page(ref->buffer, ref->page);
5474 kfree(ref);
5475 spd->partial[i].private = 0;
5476}
5477
5478static ssize_t
5479tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5480 struct pipe_inode_info *pipe, size_t len,
5481 unsigned int flags)
5482{
5483 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005484 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005485 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5486 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005487 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005488 .pages = pages_def,
5489 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005490 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005491 .flags = flags,
5492 .ops = &buffer_pipe_buf_ops,
5493 .spd_release = buffer_spd_release,
5494 };
5495 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005496 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005497 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005498
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005499 mutex_lock(&trace_types_lock);
5500
5501#ifdef CONFIG_TRACER_MAX_TRACE
5502 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5503 ret = -EBUSY;
5504 goto out;
5505 }
5506#endif
5507
5508 if (splice_grow_spd(pipe, &spd)) {
5509 ret = -ENOMEM;
5510 goto out;
5511 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005512
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005513 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005514 ret = -EINVAL;
5515 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005516 }
5517
5518 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005519 if (len < PAGE_SIZE) {
5520 ret = -EINVAL;
5521 goto out;
5522 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005523 len &= PAGE_MASK;
5524 }
5525
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005526 again:
5527 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005528 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005529
Al Viroa786c062014-04-11 12:01:03 -04005530 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005531 struct page *page;
5532 int r;
5533
5534 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5535 if (!ref)
5536 break;
5537
Steven Rostedt7267fa62009-04-29 00:16:21 -04005538 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005539 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005540 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005541 if (!ref->page) {
5542 kfree(ref);
5543 break;
5544 }
5545
5546 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005547 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005548 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005549 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005550 kfree(ref);
5551 break;
5552 }
5553
5554 /*
5555 * zero out any left over data, this is going to
5556 * user land.
5557 */
5558 size = ring_buffer_page_len(ref->page);
5559 if (size < PAGE_SIZE)
5560 memset(ref->page + size, 0, PAGE_SIZE - size);
5561
5562 page = virt_to_page(ref->page);
5563
5564 spd.pages[i] = page;
5565 spd.partial[i].len = PAGE_SIZE;
5566 spd.partial[i].offset = 0;
5567 spd.partial[i].private = (unsigned long)ref;
5568 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005569 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005570
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005571 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005572 }
5573
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005574 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005575 spd.nr_pages = i;
5576
5577 /* did we read anything? */
5578 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005579 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005580 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005581 goto out;
5582 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005583 mutex_unlock(&trace_types_lock);
Rabin Vincente30f53a2014-11-10 19:46:34 +01005584 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005585 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005586 if (ret)
5587 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005588
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005589 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005590 }
5591
5592 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005593 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005594out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005595 mutex_unlock(&trace_types_lock);
5596
Steven Rostedt2cadf912008-12-01 22:20:19 -05005597 return ret;
5598}
5599
5600static const struct file_operations tracing_buffers_fops = {
5601 .open = tracing_buffers_open,
5602 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005603 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005604 .release = tracing_buffers_release,
5605 .splice_read = tracing_buffers_splice_read,
5606 .llseek = no_llseek,
5607};
5608
Steven Rostedtc8d77182009-04-29 18:03:45 -04005609static ssize_t
5610tracing_stats_read(struct file *filp, char __user *ubuf,
5611 size_t count, loff_t *ppos)
5612{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005613 struct inode *inode = file_inode(filp);
5614 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005615 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005616 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005617 struct trace_seq *s;
5618 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005619 unsigned long long t;
5620 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005621
Li Zefane4f2d102009-06-15 10:57:28 +08005622 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005623 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005624 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005625
5626 trace_seq_init(s);
5627
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005628 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005629 trace_seq_printf(s, "entries: %ld\n", cnt);
5630
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005631 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005632 trace_seq_printf(s, "overrun: %ld\n", cnt);
5633
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005635 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005637 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005638 trace_seq_printf(s, "bytes: %ld\n", cnt);
5639
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005640 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005641 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005643 usec_rem = do_div(t, USEC_PER_SEC);
5644 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5645 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005646
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005647 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005648 usec_rem = do_div(t, USEC_PER_SEC);
5649 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5650 } else {
5651 /* counter or tsc mode for trace_clock */
5652 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005653 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005654
5655 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005656 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005657 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005658
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005659 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005660 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005662 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005663 trace_seq_printf(s, "read events: %ld\n", cnt);
5664
Steven Rostedtc8d77182009-04-29 18:03:45 -04005665 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5666
5667 kfree(s);
5668
5669 return count;
5670}
5671
5672static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005673 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005674 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005675 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005676 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005677};
5678
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005679#ifdef CONFIG_DYNAMIC_FTRACE
5680
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005681int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005682{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005683 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005684}
5685
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005686static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005687tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005688 size_t cnt, loff_t *ppos)
5689{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005690 static char ftrace_dyn_info_buffer[1024];
5691 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005693 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005694 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005695 int r;
5696
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005697 mutex_lock(&dyn_info_mutex);
5698 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005699
Steven Rostedta26a2a22008-10-31 00:03:22 -04005700 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005701 buf[r++] = '\n';
5702
5703 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5704
5705 mutex_unlock(&dyn_info_mutex);
5706
5707 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005708}
5709
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005710static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005711 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005712 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005713 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005714};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005715#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005717#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5718static void
5719ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005720{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005721 tracing_snapshot();
5722}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005723
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005724static void
5725ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5726{
5727 unsigned long *count = (long *)data;
5728
5729 if (!*count)
5730 return;
5731
5732 if (*count != -1)
5733 (*count)--;
5734
5735 tracing_snapshot();
5736}
5737
5738static int
5739ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5740 struct ftrace_probe_ops *ops, void *data)
5741{
5742 long count = (long)data;
5743
5744 seq_printf(m, "%ps:", (void *)ip);
5745
5746 seq_printf(m, "snapshot");
5747
5748 if (count == -1)
5749 seq_printf(m, ":unlimited\n");
5750 else
5751 seq_printf(m, ":count=%ld\n", count);
5752
5753 return 0;
5754}
5755
5756static struct ftrace_probe_ops snapshot_probe_ops = {
5757 .func = ftrace_snapshot,
5758 .print = ftrace_snapshot_print,
5759};
5760
5761static struct ftrace_probe_ops snapshot_count_probe_ops = {
5762 .func = ftrace_count_snapshot,
5763 .print = ftrace_snapshot_print,
5764};
5765
5766static int
5767ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5768 char *glob, char *cmd, char *param, int enable)
5769{
5770 struct ftrace_probe_ops *ops;
5771 void *count = (void *)-1;
5772 char *number;
5773 int ret;
5774
5775 /* hash funcs only work with set_ftrace_filter */
5776 if (!enable)
5777 return -EINVAL;
5778
5779 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5780
5781 if (glob[0] == '!') {
5782 unregister_ftrace_function_probe_func(glob+1, ops);
5783 return 0;
5784 }
5785
5786 if (!param)
5787 goto out_reg;
5788
5789 number = strsep(&param, ":");
5790
5791 if (!strlen(number))
5792 goto out_reg;
5793
5794 /*
5795 * We use the callback data field (which is a pointer)
5796 * as our counter.
5797 */
5798 ret = kstrtoul(number, 0, (unsigned long *)&count);
5799 if (ret)
5800 return ret;
5801
5802 out_reg:
5803 ret = register_ftrace_function_probe(glob, ops, count);
5804
5805 if (ret >= 0)
5806 alloc_snapshot(&global_trace);
5807
5808 return ret < 0 ? ret : 0;
5809}
5810
5811static struct ftrace_func_command ftrace_snapshot_cmd = {
5812 .name = "snapshot",
5813 .func = ftrace_trace_snapshot_callback,
5814};
5815
Tom Zanussi38de93a2013-10-24 08:34:18 -05005816static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005817{
5818 return register_ftrace_command(&ftrace_snapshot_cmd);
5819}
5820#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005821static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005822#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005823
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005824struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005825{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005826 if (tr->dir)
5827 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005828
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005829 if (!debugfs_initialized())
5830 return NULL;
5831
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005832 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5833 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005834
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005835 if (!tr->dir)
5836 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005837
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005838 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005839}
5840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005842{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005843 return tracing_init_dentry_tr(&global_trace);
5844}
5845
5846static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5847{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005848 struct dentry *d_tracer;
5849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005850 if (tr->percpu_dir)
5851 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005852
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005853 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005854 if (!d_tracer)
5855 return NULL;
5856
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005857 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005859 WARN_ONCE(!tr->percpu_dir,
5860 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005861
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005862 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005863}
5864
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005865static struct dentry *
5866trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5867 void *data, long cpu, const struct file_operations *fops)
5868{
5869 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5870
5871 if (ret) /* See tracing_get_cpu() */
5872 ret->d_inode->i_cdev = (void *)(cpu + 1);
5873 return ret;
5874}
5875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005876static void
5877tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005878{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005879 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005880 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005881 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005882
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005883 if (!d_percpu)
5884 return;
5885
Steven Rostedtdd49a382010-10-20 21:51:26 -04005886 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005887 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5888 if (!d_cpu) {
5889 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5890 return;
5891 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005892
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005893 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005894 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005895 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005896
5897 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005898 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005899 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005900
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005901 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005902 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005903
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005904 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005905 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005906
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005907 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005908 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005909
5910#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005911 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005912 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005913
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005914 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005915 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005916#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005917}
5918
Steven Rostedt60a11772008-05-12 21:20:44 +02005919#ifdef CONFIG_FTRACE_SELFTEST
5920/* Let selftest have access to static functions in this file */
5921#include "trace_selftest.c"
5922#endif
5923
Steven Rostedt577b7852009-02-26 23:43:05 -05005924struct trace_option_dentry {
5925 struct tracer_opt *opt;
5926 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005927 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005928 struct dentry *entry;
5929};
5930
5931static ssize_t
5932trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5933 loff_t *ppos)
5934{
5935 struct trace_option_dentry *topt = filp->private_data;
5936 char *buf;
5937
5938 if (topt->flags->val & topt->opt->bit)
5939 buf = "1\n";
5940 else
5941 buf = "0\n";
5942
5943 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5944}
5945
5946static ssize_t
5947trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5948 loff_t *ppos)
5949{
5950 struct trace_option_dentry *topt = filp->private_data;
5951 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005952 int ret;
5953
Peter Huewe22fe9b52011-06-07 21:58:27 +02005954 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5955 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005956 return ret;
5957
Li Zefan8d18eaa2009-12-08 11:17:06 +08005958 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005959 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005960
5961 if (!!(topt->flags->val & topt->opt->bit) != val) {
5962 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005963 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005964 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005965 mutex_unlock(&trace_types_lock);
5966 if (ret)
5967 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005968 }
5969
5970 *ppos += cnt;
5971
5972 return cnt;
5973}
5974
5975
5976static const struct file_operations trace_options_fops = {
5977 .open = tracing_open_generic,
5978 .read = trace_options_read,
5979 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005980 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005981};
5982
Steven Rostedta8259072009-02-26 22:19:12 -05005983static ssize_t
5984trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5985 loff_t *ppos)
5986{
5987 long index = (long)filp->private_data;
5988 char *buf;
5989
5990 if (trace_flags & (1 << index))
5991 buf = "1\n";
5992 else
5993 buf = "0\n";
5994
5995 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5996}
5997
5998static ssize_t
5999trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6000 loff_t *ppos)
6001{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006002 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006003 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006004 unsigned long val;
6005 int ret;
6006
Peter Huewe22fe9b52011-06-07 21:58:27 +02006007 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6008 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006009 return ret;
6010
Zhaoleif2d84b62009-08-07 18:55:48 +08006011 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006012 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006013
6014 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006015 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006016 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006017
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006018 if (ret < 0)
6019 return ret;
6020
Steven Rostedta8259072009-02-26 22:19:12 -05006021 *ppos += cnt;
6022
6023 return cnt;
6024}
6025
Steven Rostedta8259072009-02-26 22:19:12 -05006026static const struct file_operations trace_options_core_fops = {
6027 .open = tracing_open_generic,
6028 .read = trace_options_core_read,
6029 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006030 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006031};
6032
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006033struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006034 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006035 struct dentry *parent,
6036 void *data,
6037 const struct file_operations *fops)
6038{
6039 struct dentry *ret;
6040
6041 ret = debugfs_create_file(name, mode, parent, data, fops);
6042 if (!ret)
6043 pr_warning("Could not create debugfs '%s' entry\n", name);
6044
6045 return ret;
6046}
6047
6048
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006049static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006050{
6051 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006052
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006053 if (tr->options)
6054 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006056 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006057 if (!d_tracer)
6058 return NULL;
6059
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006060 tr->options = debugfs_create_dir("options", d_tracer);
6061 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006062 pr_warning("Could not create debugfs directory 'options'\n");
6063 return NULL;
6064 }
6065
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006066 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006067}
6068
Steven Rostedt577b7852009-02-26 23:43:05 -05006069static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006070create_trace_option_file(struct trace_array *tr,
6071 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006072 struct tracer_flags *flags,
6073 struct tracer_opt *opt)
6074{
6075 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006077 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006078 if (!t_options)
6079 return;
6080
6081 topt->flags = flags;
6082 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006083 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006084
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006085 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006086 &trace_options_fops);
6087
Steven Rostedt577b7852009-02-26 23:43:05 -05006088}
6089
6090static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006091create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006092{
6093 struct trace_option_dentry *topts;
6094 struct tracer_flags *flags;
6095 struct tracer_opt *opts;
6096 int cnt;
6097
6098 if (!tracer)
6099 return NULL;
6100
6101 flags = tracer->flags;
6102
6103 if (!flags || !flags->opts)
6104 return NULL;
6105
6106 opts = flags->opts;
6107
6108 for (cnt = 0; opts[cnt].name; cnt++)
6109 ;
6110
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006111 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006112 if (!topts)
6113 return NULL;
6114
6115 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006116 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006117 &opts[cnt]);
6118
6119 return topts;
6120}
6121
6122static void
6123destroy_trace_option_files(struct trace_option_dentry *topts)
6124{
6125 int cnt;
6126
6127 if (!topts)
6128 return;
6129
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006130 for (cnt = 0; topts[cnt].opt; cnt++)
6131 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006132
6133 kfree(topts);
6134}
6135
Steven Rostedta8259072009-02-26 22:19:12 -05006136static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006137create_trace_option_core_file(struct trace_array *tr,
6138 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006139{
6140 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006141
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006142 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006143 if (!t_options)
6144 return NULL;
6145
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006146 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006147 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006148}
6149
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006150static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006151{
6152 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006153 int i;
6154
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006155 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006156 if (!t_options)
6157 return;
6158
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006159 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006160 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006161}
6162
Steven Rostedt499e5472012-02-22 15:50:28 -05006163static ssize_t
6164rb_simple_read(struct file *filp, char __user *ubuf,
6165 size_t cnt, loff_t *ppos)
6166{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006167 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006168 char buf[64];
6169 int r;
6170
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006171 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006172 r = sprintf(buf, "%d\n", r);
6173
6174 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6175}
6176
6177static ssize_t
6178rb_simple_write(struct file *filp, const char __user *ubuf,
6179 size_t cnt, loff_t *ppos)
6180{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006181 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006182 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006183 unsigned long val;
6184 int ret;
6185
6186 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6187 if (ret)
6188 return ret;
6189
6190 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006191 mutex_lock(&trace_types_lock);
6192 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006193 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006194 if (tr->current_trace->start)
6195 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006196 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006197 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006198 if (tr->current_trace->stop)
6199 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006200 }
6201 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006202 }
6203
6204 (*ppos)++;
6205
6206 return cnt;
6207}
6208
6209static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006210 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006211 .read = rb_simple_read,
6212 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006213 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006214 .llseek = default_llseek,
6215};
6216
Steven Rostedt277ba042012-08-03 16:10:49 -04006217struct dentry *trace_instance_dir;
6218
6219static void
6220init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6221
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006222static int
6223allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006224{
6225 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006226
6227 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6228
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006229 buf->tr = tr;
6230
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006231 buf->buffer = ring_buffer_alloc(size, rb_flags);
6232 if (!buf->buffer)
6233 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006234
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006235 buf->data = alloc_percpu(struct trace_array_cpu);
6236 if (!buf->data) {
6237 ring_buffer_free(buf->buffer);
6238 return -ENOMEM;
6239 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006240
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006241 /* Allocate the first page for all buffers */
6242 set_buffer_entries(&tr->trace_buffer,
6243 ring_buffer_size(tr->trace_buffer.buffer, 0));
6244
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006245 return 0;
6246}
6247
6248static int allocate_trace_buffers(struct trace_array *tr, int size)
6249{
6250 int ret;
6251
6252 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6253 if (ret)
6254 return ret;
6255
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006256#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006257 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6258 allocate_snapshot ? size : 1);
6259 if (WARN_ON(ret)) {
6260 ring_buffer_free(tr->trace_buffer.buffer);
6261 free_percpu(tr->trace_buffer.data);
6262 return -ENOMEM;
6263 }
6264 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006265
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006266 /*
6267 * Only the top level trace array gets its snapshot allocated
6268 * from the kernel command line.
6269 */
6270 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006271#endif
6272 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006273}
6274
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006275static void free_trace_buffer(struct trace_buffer *buf)
6276{
6277 if (buf->buffer) {
6278 ring_buffer_free(buf->buffer);
6279 buf->buffer = NULL;
6280 free_percpu(buf->data);
6281 buf->data = NULL;
6282 }
6283}
6284
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006285static void free_trace_buffers(struct trace_array *tr)
6286{
6287 if (!tr)
6288 return;
6289
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006290 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006291
6292#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006293 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006294#endif
6295}
6296
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006297static int new_instance_create(const char *name)
6298{
Steven Rostedt277ba042012-08-03 16:10:49 -04006299 struct trace_array *tr;
6300 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006301
6302 mutex_lock(&trace_types_lock);
6303
6304 ret = -EEXIST;
6305 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6306 if (tr->name && strcmp(tr->name, name) == 0)
6307 goto out_unlock;
6308 }
6309
6310 ret = -ENOMEM;
6311 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6312 if (!tr)
6313 goto out_unlock;
6314
6315 tr->name = kstrdup(name, GFP_KERNEL);
6316 if (!tr->name)
6317 goto out_free_tr;
6318
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006319 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6320 goto out_free_tr;
6321
6322 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6323
Steven Rostedt277ba042012-08-03 16:10:49 -04006324 raw_spin_lock_init(&tr->start_lock);
6325
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006326 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6327
Steven Rostedt277ba042012-08-03 16:10:49 -04006328 tr->current_trace = &nop_trace;
6329
6330 INIT_LIST_HEAD(&tr->systems);
6331 INIT_LIST_HEAD(&tr->events);
6332
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006333 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006334 goto out_free_tr;
6335
Steven Rostedt277ba042012-08-03 16:10:49 -04006336 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6337 if (!tr->dir)
6338 goto out_free_tr;
6339
6340 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006341 if (ret) {
6342 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006343 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006344 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006345
6346 init_tracer_debugfs(tr, tr->dir);
6347
6348 list_add(&tr->list, &ftrace_trace_arrays);
6349
6350 mutex_unlock(&trace_types_lock);
6351
6352 return 0;
6353
6354 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006355 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006356 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006357 kfree(tr->name);
6358 kfree(tr);
6359
6360 out_unlock:
6361 mutex_unlock(&trace_types_lock);
6362
6363 return ret;
6364
6365}
6366
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006367static int instance_delete(const char *name)
6368{
6369 struct trace_array *tr;
6370 int found = 0;
6371 int ret;
6372
6373 mutex_lock(&trace_types_lock);
6374
6375 ret = -ENODEV;
6376 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6377 if (tr->name && strcmp(tr->name, name) == 0) {
6378 found = 1;
6379 break;
6380 }
6381 }
6382 if (!found)
6383 goto out_unlock;
6384
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006385 ret = -EBUSY;
6386 if (tr->ref)
6387 goto out_unlock;
6388
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006389 list_del(&tr->list);
6390
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006391 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006392 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006393 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006394 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006395 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006396
6397 kfree(tr->name);
6398 kfree(tr);
6399
6400 ret = 0;
6401
6402 out_unlock:
6403 mutex_unlock(&trace_types_lock);
6404
6405 return ret;
6406}
6407
Steven Rostedt277ba042012-08-03 16:10:49 -04006408static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6409{
6410 struct dentry *parent;
6411 int ret;
6412
6413 /* Paranoid: Make sure the parent is the "instances" directory */
6414 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6415 if (WARN_ON_ONCE(parent != trace_instance_dir))
6416 return -ENOENT;
6417
6418 /*
6419 * The inode mutex is locked, but debugfs_create_dir() will also
6420 * take the mutex. As the instances directory can not be destroyed
6421 * or changed in any other way, it is safe to unlock it, and
6422 * let the dentry try. If two users try to make the same dir at
6423 * the same time, then the new_instance_create() will determine the
6424 * winner.
6425 */
6426 mutex_unlock(&inode->i_mutex);
6427
6428 ret = new_instance_create(dentry->d_iname);
6429
6430 mutex_lock(&inode->i_mutex);
6431
6432 return ret;
6433}
6434
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006435static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6436{
6437 struct dentry *parent;
6438 int ret;
6439
6440 /* Paranoid: Make sure the parent is the "instances" directory */
6441 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6442 if (WARN_ON_ONCE(parent != trace_instance_dir))
6443 return -ENOENT;
6444
6445 /* The caller did a dget() on dentry */
6446 mutex_unlock(&dentry->d_inode->i_mutex);
6447
6448 /*
6449 * The inode mutex is locked, but debugfs_create_dir() will also
6450 * take the mutex. As the instances directory can not be destroyed
6451 * or changed in any other way, it is safe to unlock it, and
6452 * let the dentry try. If two users try to make the same dir at
6453 * the same time, then the instance_delete() will determine the
6454 * winner.
6455 */
6456 mutex_unlock(&inode->i_mutex);
6457
6458 ret = instance_delete(dentry->d_iname);
6459
6460 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6461 mutex_lock(&dentry->d_inode->i_mutex);
6462
6463 return ret;
6464}
6465
Steven Rostedt277ba042012-08-03 16:10:49 -04006466static const struct inode_operations instance_dir_inode_operations = {
6467 .lookup = simple_lookup,
6468 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006469 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006470};
6471
6472static __init void create_trace_instances(struct dentry *d_tracer)
6473{
6474 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6475 if (WARN_ON(!trace_instance_dir))
6476 return;
6477
6478 /* Hijack the dir inode operations, to allow mkdir */
6479 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6480}
6481
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006482static void
6483init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6484{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006485 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006486
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006487 trace_create_file("available_tracers", 0444, d_tracer,
6488 tr, &show_traces_fops);
6489
6490 trace_create_file("current_tracer", 0644, d_tracer,
6491 tr, &set_tracer_fops);
6492
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006493 trace_create_file("tracing_cpumask", 0644, d_tracer,
6494 tr, &tracing_cpumask_fops);
6495
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006496 trace_create_file("trace_options", 0644, d_tracer,
6497 tr, &tracing_iter_fops);
6498
6499 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006500 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006501
6502 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006503 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006504
6505 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006506 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006507
6508 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6509 tr, &tracing_total_entries_fops);
6510
Wang YanQing238ae932013-05-26 16:52:01 +08006511 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006512 tr, &tracing_free_buffer_fops);
6513
6514 trace_create_file("trace_marker", 0220, d_tracer,
6515 tr, &tracing_mark_fops);
6516
6517 trace_create_file("trace_clock", 0644, d_tracer, tr,
6518 &trace_clock_fops);
6519
6520 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006521 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006522
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006523#ifdef CONFIG_TRACER_MAX_TRACE
6524 trace_create_file("tracing_max_latency", 0644, d_tracer,
6525 &tr->max_latency, &tracing_max_lat_fops);
6526#endif
6527
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006528 if (ftrace_create_function_files(tr, d_tracer))
6529 WARN(1, "Could not allocate function filter files");
6530
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006531#ifdef CONFIG_TRACER_SNAPSHOT
6532 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006533 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006534#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006535
6536 for_each_tracing_cpu(cpu)
6537 tracing_init_debugfs_percpu(tr, cpu);
6538
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006539}
6540
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006541static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006542{
6543 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006544
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006545 trace_access_lock_init();
6546
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006547 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006548 if (!d_tracer)
6549 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006550
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006551 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006552
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006553 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006554 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006555
Li Zefan339ae5d2009-04-17 10:34:30 +08006556 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006557 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006558
Avadh Patel69abe6a2009-04-10 16:04:48 -04006559 trace_create_file("saved_cmdlines", 0444, d_tracer,
6560 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006561
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006562 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6563 NULL, &tracing_saved_cmdlines_size_fops);
6564
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006565#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006566 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6567 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006568#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006569
Steven Rostedt277ba042012-08-03 16:10:49 -04006570 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006571
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006572 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006573
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006574 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006575}
6576
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006577static int trace_panic_handler(struct notifier_block *this,
6578 unsigned long event, void *unused)
6579{
Steven Rostedt944ac422008-10-23 19:26:08 -04006580 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006581 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006582 return NOTIFY_OK;
6583}
6584
6585static struct notifier_block trace_panic_notifier = {
6586 .notifier_call = trace_panic_handler,
6587 .next = NULL,
6588 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6589};
6590
6591static int trace_die_handler(struct notifier_block *self,
6592 unsigned long val,
6593 void *data)
6594{
6595 switch (val) {
6596 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006597 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006598 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006599 break;
6600 default:
6601 break;
6602 }
6603 return NOTIFY_OK;
6604}
6605
6606static struct notifier_block trace_die_notifier = {
6607 .notifier_call = trace_die_handler,
6608 .priority = 200
6609};
6610
6611/*
6612 * printk is set to max of 1024, we really don't need it that big.
6613 * Nothing should be printing 1000 characters anyway.
6614 */
6615#define TRACE_MAX_PRINT 1000
6616
6617/*
6618 * Define here KERN_TRACE so that we have one place to modify
6619 * it if we decide to change what log level the ftrace dump
6620 * should be at.
6621 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006622#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006623
Jason Wessel955b61e2010-08-05 09:22:23 -05006624void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006625trace_printk_seq(struct trace_seq *s)
6626{
6627 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006628 if (s->len >= TRACE_MAX_PRINT)
6629 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006630
6631 /* should be zero ended, but we are paranoid. */
6632 s->buffer[s->len] = 0;
6633
6634 printk(KERN_TRACE "%s", s->buffer);
6635
Steven Rostedtf9520752009-03-02 14:04:40 -05006636 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006637}
6638
Jason Wessel955b61e2010-08-05 09:22:23 -05006639void trace_init_global_iter(struct trace_iterator *iter)
6640{
6641 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006642 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006643 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006644 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006645
6646 if (iter->trace && iter->trace->open)
6647 iter->trace->open(iter);
6648
6649 /* Annotate start of buffers if we had overruns */
6650 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6651 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6652
6653 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6654 if (trace_clocks[iter->tr->clock_id].in_ns)
6655 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006656}
6657
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006658void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006659{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006660 /* use static because iter can be a bit big for the stack */
6661 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006662 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006663 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006664 unsigned long flags;
6665 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006666
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006667 /* Only allow one dump user at a time. */
6668 if (atomic_inc_return(&dump_running) != 1) {
6669 atomic_dec(&dump_running);
6670 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006671 }
6672
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006673 /*
6674 * Always turn off tracing when we dump.
6675 * We don't need to show trace output of what happens
6676 * between multiple crashes.
6677 *
6678 * If the user does a sysrq-z, then they can re-enable
6679 * tracing with echo 1 > tracing_on.
6680 */
6681 tracing_off();
6682
6683 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006684
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006685 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006686 trace_init_global_iter(&iter);
6687
Steven Rostedtd7690412008-10-01 00:29:53 -04006688 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006689 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006690 }
6691
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006692 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6693
Török Edwinb54d3de2008-11-22 13:28:48 +02006694 /* don't look at user memory in panic mode */
6695 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6696
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006697 switch (oops_dump_mode) {
6698 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006699 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006700 break;
6701 case DUMP_ORIG:
6702 iter.cpu_file = raw_smp_processor_id();
6703 break;
6704 case DUMP_NONE:
6705 goto out_enable;
6706 default:
6707 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006708 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006709 }
6710
6711 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006712
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006713 /* Did function tracer already get disabled? */
6714 if (ftrace_is_dead()) {
6715 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6716 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6717 }
6718
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006719 /*
6720 * We need to stop all tracing on all CPUS to read the
6721 * the next buffer. This is a bit expensive, but is
6722 * not done often. We fill all what we can read,
6723 * and then release the locks again.
6724 */
6725
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006726 while (!trace_empty(&iter)) {
6727
6728 if (!cnt)
6729 printk(KERN_TRACE "---------------------------------\n");
6730
6731 cnt++;
6732
6733 /* reset all but tr, trace, and overruns */
6734 memset(&iter.seq, 0,
6735 sizeof(struct trace_iterator) -
6736 offsetof(struct trace_iterator, seq));
6737 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6738 iter.pos = -1;
6739
Jason Wessel955b61e2010-08-05 09:22:23 -05006740 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006741 int ret;
6742
6743 ret = print_trace_line(&iter);
6744 if (ret != TRACE_TYPE_NO_CONSUME)
6745 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006746 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006747 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006748
6749 trace_printk_seq(&iter.seq);
6750 }
6751
6752 if (!cnt)
6753 printk(KERN_TRACE " (ftrace buffer empty)\n");
6754 else
6755 printk(KERN_TRACE "---------------------------------\n");
6756
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006757 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006758 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006759
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006760 for_each_tracing_cpu(cpu) {
6761 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006762 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006763 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006764 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006765}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006766EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006768__init static int tracer_alloc_buffers(void)
6769{
Steven Rostedt73c51622009-03-11 13:42:01 -04006770 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306771 int ret = -ENOMEM;
6772
David Sharp750912f2010-12-08 13:46:47 -08006773
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306774 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6775 goto out;
6776
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006777 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306778 goto out_free_buffer_mask;
6779
Steven Rostedt07d777f2011-09-22 14:01:55 -04006780 /* Only allocate trace_printk buffers if a trace_printk exists */
6781 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006782 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006783 trace_printk_init_buffers();
6784
Steven Rostedt73c51622009-03-11 13:42:01 -04006785 /* To save memory, keep the ring buffer size to its minimum */
6786 if (ring_buffer_expanded)
6787 ring_buf_size = trace_buf_size;
6788 else
6789 ring_buf_size = 1;
6790
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306791 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006792 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006793
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006794 raw_spin_lock_init(&global_trace.start_lock);
6795
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006796 /* Used for event triggers */
6797 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6798 if (!temp_buffer)
6799 goto out_free_cpumask;
6800
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006801 if (trace_create_savedcmd() < 0)
6802 goto out_free_temp_buffer;
6803
Steven Rostedtab464282008-05-12 21:21:00 +02006804 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006805 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006806 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6807 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006808 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006809 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006810
Steven Rostedt499e5472012-02-22 15:50:28 -05006811 if (global_trace.buffer_disabled)
6812 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006813
Steven Rostedte1e232c2014-02-10 23:38:46 -05006814 if (trace_boot_clock) {
6815 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6816 if (ret < 0)
6817 pr_warning("Trace clock %s not defined, going back to default\n",
6818 trace_boot_clock);
6819 }
6820
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006821 /*
6822 * register_tracer() might reference current_trace, so it
6823 * needs to be set before we register anything. This is
6824 * just a bootstrap of current_trace anyway.
6825 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006826 global_trace.current_trace = &nop_trace;
6827
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006828 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6829
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006830 ftrace_init_global_array_ops(&global_trace);
6831
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006832 register_tracer(&nop_trace);
6833
Steven Rostedt60a11772008-05-12 21:20:44 +02006834 /* All seems OK, enable tracing */
6835 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006836
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006837 atomic_notifier_chain_register(&panic_notifier_list,
6838 &trace_panic_notifier);
6839
6840 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006841
Steven Rostedtae63b312012-05-03 23:09:03 -04006842 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6843
6844 INIT_LIST_HEAD(&global_trace.systems);
6845 INIT_LIST_HEAD(&global_trace.events);
6846 list_add(&global_trace.list, &ftrace_trace_arrays);
6847
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006848 while (trace_boot_options) {
6849 char *option;
6850
6851 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006852 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006853 }
6854
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006855 register_snapshot_cmd();
6856
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006857 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006858
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006859out_free_savedcmd:
6860 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006861out_free_temp_buffer:
6862 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306863out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006864 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306865out_free_buffer_mask:
6866 free_cpumask_var(tracing_buffer_mask);
6867out:
6868 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006869}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006870
6871__init static int clear_boot_tracer(void)
6872{
6873 /*
6874 * The default tracer at boot buffer is an init section.
6875 * This function is called in lateinit. If we did not
6876 * find the boot tracer, then clear it out, to prevent
6877 * later registration from accessing the buffer that is
6878 * about to be freed.
6879 */
6880 if (!default_bootup_tracer)
6881 return 0;
6882
6883 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6884 default_bootup_tracer);
6885 default_bootup_tracer = NULL;
6886
6887 return 0;
6888}
6889
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006890early_initcall(tracer_alloc_buffers);
6891fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006892late_initcall(clear_boot_tracer);