blob: 8bb80fe08767f462a1f72552635a4bf04133b8f8 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800469 int pc;
470
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800474 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500475
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500476 if (unlikely(tracing_selftest_running || tracing_disabled))
477 return 0;
478
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500479 alloc = sizeof(*entry) + size + 2; /* possible \n added */
480
481 local_save_flags(irq_flags);
482 buffer = global_trace.trace_buffer.buffer;
483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800484 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500485 if (!event)
486 return 0;
487
488 entry = ring_buffer_event_data(event);
489 entry->ip = ip;
490
491 memcpy(&entry->buf, str, size);
492
493 /* Add a newline if necessary */
494 if (entry->buf[size - 1] != '\n') {
495 entry->buf[size] = '\n';
496 entry->buf[size + 1] = '\0';
497 } else
498 entry->buf[size] = '\0';
499
500 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500502
503 return size;
504}
505EXPORT_SYMBOL_GPL(__trace_puts);
506
507/**
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
511 */
512int __trace_bputs(unsigned long ip, const char *str)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct bputs_entry *entry;
517 unsigned long irq_flags;
518 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800519 int pc;
520
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800524 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500525
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500529 local_save_flags(irq_flags);
530 buffer = global_trace.trace_buffer.buffer;
531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800532 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500533 if (!event)
534 return 0;
535
536 entry = ring_buffer_event_data(event);
537 entry->ip = ip;
538 entry->str = str;
539
540 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500542
543 return 1;
544}
545EXPORT_SYMBOL_GPL(__trace_bputs);
546
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500547#ifdef CONFIG_TRACER_SNAPSHOT
548/**
549 * trace_snapshot - take a snapshot of the current buffer.
550 *
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
554 *
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
558 *
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
561 */
562void tracing_snapshot(void)
563{
564 struct trace_array *tr = &global_trace;
565 struct tracer *tracer = tr->current_trace;
566 unsigned long flags;
567
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500568 if (in_nmi()) {
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
571 return;
572 }
573
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500574 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500577 tracing_off();
578 return;
579 }
580
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500585 return;
586 }
587
588 local_irq_save(flags);
589 update_max_tr(tr, current, smp_processor_id());
590 local_irq_restore(flags);
591}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500592EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500593
594static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
595 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400596static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
597
598static int alloc_snapshot(struct trace_array *tr)
599{
600 int ret;
601
602 if (!tr->allocated_snapshot) {
603
604 /* allocate spare buffer */
605 ret = resize_buffer_duplicate_size(&tr->max_buffer,
606 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
607 if (ret < 0)
608 return ret;
609
610 tr->allocated_snapshot = true;
611 }
612
613 return 0;
614}
615
Fabian Frederickad1438a2014-04-17 21:44:42 +0200616static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400617{
618 /*
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
622 */
623 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
624 set_buffer_entries(&tr->max_buffer, 1);
625 tracing_reset_online_cpus(&tr->max_buffer);
626 tr->allocated_snapshot = false;
627}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500628
629/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500630 * tracing_alloc_snapshot - allocate snapshot buffer.
631 *
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
634 *
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
638 */
639int tracing_alloc_snapshot(void)
640{
641 struct trace_array *tr = &global_trace;
642 int ret;
643
644 ret = alloc_snapshot(tr);
645 WARN_ON(ret < 0);
646
647 return ret;
648}
649EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
650
651/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
653 *
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
657 *
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
661 */
662void tracing_snapshot_alloc(void)
663{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 int ret;
665
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500666 ret = tracing_alloc_snapshot();
667 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400668 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500669
670 tracing_snapshot();
671}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500672EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500673#else
674void tracing_snapshot(void)
675{
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
677}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500678EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500679int tracing_alloc_snapshot(void)
680{
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
682 return -ENODEV;
683}
684EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685void tracing_snapshot_alloc(void)
686{
687 /* Give warning */
688 tracing_snapshot();
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500691#endif /* CONFIG_TRACER_SNAPSHOT */
692
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400693static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694{
695 if (tr->trace_buffer.buffer)
696 ring_buffer_record_off(tr->trace_buffer.buffer);
697 /*
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
704 */
705 tr->buffer_disabled = 1;
706 /* Make the flag seen by readers */
707 smp_wmb();
708}
709
Steven Rostedt499e5472012-02-22 15:50:28 -0500710/**
711 * tracing_off - turn off tracing buffers
712 *
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
717 */
718void tracing_off(void)
719{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400720 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500721}
722EXPORT_SYMBOL_GPL(tracing_off);
723
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400724void disable_trace_on_warning(void)
725{
726 if (__disable_trace_on_warning)
727 tracing_off();
728}
729
Steven Rostedt499e5472012-02-22 15:50:28 -0500730/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
733 *
734 * Shows real state of the ring buffer if it is enabled or not.
735 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400736static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400737{
738 if (tr->trace_buffer.buffer)
739 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
740 return !tr->buffer_disabled;
741}
742
Steven Rostedt499e5472012-02-22 15:50:28 -0500743/**
744 * tracing_is_on - show state of ring buffers enabled
745 */
746int tracing_is_on(void)
747{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400748 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500749}
750EXPORT_SYMBOL_GPL(tracing_is_on);
751
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400752static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200753{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400754 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200756 if (!str)
757 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800758 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200759 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800760 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200761 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400762 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200763 return 1;
764}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400765__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200766
Tim Bird0e950172010-02-25 15:36:43 -0800767static int __init set_tracing_thresh(char *str)
768{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800769 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800770 int ret;
771
772 if (!str)
773 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200774 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800775 if (ret < 0)
776 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800777 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800778 return 1;
779}
780__setup("tracing_thresh=", set_tracing_thresh);
781
Steven Rostedt57f50be2008-05-12 21:20:44 +0200782unsigned long nsecs_to_usecs(unsigned long nsecs)
783{
784 return nsecs / 1000;
785}
786
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200787/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200788static const char *trace_options[] = {
789 "print-parent",
790 "sym-offset",
791 "sym-addr",
792 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200793 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200794 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200795 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200796 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200797 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100798 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500799 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500800 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500801 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200802 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200803 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100804 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200805 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500806 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400807 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400808 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800809 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800810 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400811 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500812 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700813 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400814 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200815 NULL
816};
817
Zhaolei5079f322009-08-25 16:12:56 +0800818static struct {
819 u64 (*func)(void);
820 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800821 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800822} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 },
Tony Luck58d4e212014-07-18 11:43:01 -0700826 { trace_clock_jiffies, "uptime", 0 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400827 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800828 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800829};
830
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200831/*
832 * trace_parser_get_init - gets the buffer for trace parser
833 */
834int trace_parser_get_init(struct trace_parser *parser, int size)
835{
836 memset(parser, 0, sizeof(*parser));
837
838 parser->buffer = kmalloc(size, GFP_KERNEL);
839 if (!parser->buffer)
840 return 1;
841
842 parser->size = size;
843 return 0;
844}
845
846/*
847 * trace_parser_put - frees the buffer for trace parser
848 */
849void trace_parser_put(struct trace_parser *parser)
850{
851 kfree(parser->buffer);
852}
853
854/*
855 * trace_get_user - reads the user input string separated by space
856 * (matched by isspace(ch))
857 *
858 * For each string found the 'struct trace_parser' is updated,
859 * and the function returns.
860 *
861 * Returns number of bytes read.
862 *
863 * See kernel/trace/trace.h for 'struct trace_parser' details.
864 */
865int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
866 size_t cnt, loff_t *ppos)
867{
868 char ch;
869 size_t read = 0;
870 ssize_t ret;
871
872 if (!*ppos)
873 trace_parser_clear(parser);
874
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878
879 read++;
880 cnt--;
881
882 /*
883 * The parser is not finished with the last write,
884 * continue reading the user input without skipping spaces.
885 */
886 if (!parser->cont) {
887 /* skip white space */
888 while (cnt && isspace(ch)) {
889 ret = get_user(ch, ubuf++);
890 if (ret)
891 goto out;
892 read++;
893 cnt--;
894 }
895
896 /* only spaces were written */
897 if (isspace(ch)) {
898 *ppos += read;
899 ret = read;
900 goto out;
901 }
902
903 parser->idx = 0;
904 }
905
906 /* read the non-space input */
907 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800908 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200909 parser->buffer[parser->idx++] = ch;
910 else {
911 ret = -EINVAL;
912 goto out;
913 }
914 ret = get_user(ch, ubuf++);
915 if (ret)
916 goto out;
917 read++;
918 cnt--;
919 }
920
921 /* We either got finished input or we have to wait for another call. */
922 if (isspace(ch)) {
923 parser->buffer[parser->idx] = 0;
924 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400925 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200926 parser->cont = true;
927 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400928 } else {
929 ret = -EINVAL;
930 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200931 }
932
933 *ppos += read;
934 ret = read;
935
936out:
937 return ret;
938}
939
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200940static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200941{
942 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200943
944 if (s->len <= s->readpos)
945 return -EBUSY;
946
947 len = s->len - s->readpos;
948 if (cnt > len)
949 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300950 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951
Steven Rostedte74da522009-03-04 20:31:11 -0500952 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953 return cnt;
954}
955
Tim Bird0e950172010-02-25 15:36:43 -0800956unsigned long __read_mostly tracing_thresh;
957
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400958#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400959/*
960 * Copy the new maximum trace into the separate maximum-trace
961 * structure. (this way the maximum trace is permanently saved,
962 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
963 */
964static void
965__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
966{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500967 struct trace_buffer *trace_buf = &tr->trace_buffer;
968 struct trace_buffer *max_buf = &tr->max_buffer;
969 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
970 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400971
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500972 max_buf->cpu = cpu;
973 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500975 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400976 max_data->critical_start = data->critical_start;
977 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400978
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300979 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400980 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400981 /*
982 * If tsk == current, then use current_uid(), as that does not use
983 * RCU. The irq tracer can be called out of RCU scope.
984 */
985 if (tsk == current)
986 max_data->uid = current_uid();
987 else
988 max_data->uid = task_uid(tsk);
989
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
991 max_data->policy = tsk->policy;
992 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400993
994 /* record this tasks comm */
995 tracing_record_cmdline(tsk);
996}
997
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200998/**
999 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1000 * @tr: tracer
1001 * @tsk: the task with the latency
1002 * @cpu: The cpu that initiated the trace.
1003 *
1004 * Flip the buffers between the @tr and the max_tr and record information
1005 * about which task was the cause of this latency.
1006 */
Ingo Molnare309b412008-05-12 21:20:51 +02001007void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001008update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1009{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001010 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001011
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001012 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001013 return;
1014
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001015 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001016
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001017 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001018 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001019 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001020 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001021 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001022
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001023 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001024
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001025 buf = tr->trace_buffer.buffer;
1026 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1027 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001028
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001029 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001030 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001031}
1032
1033/**
1034 * update_max_tr_single - only copy one trace over, and reset the rest
1035 * @tr - tracer
1036 * @tsk - task with the latency
1037 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001038 *
1039 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001040 */
Ingo Molnare309b412008-05-12 21:20:51 +02001041void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001042update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1043{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001044 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001046 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001047 return;
1048
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001049 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001050 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001051 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001052 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001053 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001054 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001055
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001056 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001057
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001058 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001059
Steven Rostedte8165db2009-09-03 19:13:05 -04001060 if (ret == -EBUSY) {
1061 /*
1062 * We failed to swap the buffer due to a commit taking
1063 * place on this CPU. We fail to record, but we reset
1064 * the max trace buffer (no one writes directly to it)
1065 * and flag that it failed.
1066 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001067 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001068 "Failed to swap buffers due to commit in progress\n");
1069 }
1070
Steven Rostedte8165db2009-09-03 19:13:05 -04001071 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001072
1073 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001074 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001075}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001076#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001078static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001079{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001080 /* Iterators are static, they should be filled or empty */
1081 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001082 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001083
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001084 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001085}
1086
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001087#ifdef CONFIG_FTRACE_STARTUP_TEST
1088static int run_tracer_selftest(struct tracer *type)
1089{
1090 struct trace_array *tr = &global_trace;
1091 struct tracer *saved_tracer = tr->current_trace;
1092 int ret;
1093
1094 if (!type->selftest || tracing_selftest_disabled)
1095 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001096
1097 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001098 * Run a selftest on this tracer.
1099 * Here we reset the trace buffer, and set the current
1100 * tracer to be this tracer. The tracer can then run some
1101 * internal tracing to verify that everything is in order.
1102 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001103 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001104 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001105
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001106 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001107
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108#ifdef CONFIG_TRACER_MAX_TRACE
1109 if (type->use_max_tr) {
1110 /* If we expanded the buffers, make sure the max is expanded too */
1111 if (ring_buffer_expanded)
1112 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1113 RING_BUFFER_ALL_CPUS);
1114 tr->allocated_snapshot = true;
1115 }
1116#endif
1117
1118 /* the test is responsible for initializing and enabling */
1119 pr_info("Testing tracer %s: ", type->name);
1120 ret = type->selftest(type, tr);
1121 /* the test is responsible for resetting too */
1122 tr->current_trace = saved_tracer;
1123 if (ret) {
1124 printk(KERN_CONT "FAILED!\n");
1125 /* Add the warning after printing 'FAILED' */
1126 WARN_ON(1);
1127 return -1;
1128 }
1129 /* Only reset on passing, to avoid touching corrupted buffers */
1130 tracing_reset_online_cpus(&tr->trace_buffer);
1131
1132#ifdef CONFIG_TRACER_MAX_TRACE
1133 if (type->use_max_tr) {
1134 tr->allocated_snapshot = false;
1135
1136 /* Shrink the max buffer again */
1137 if (ring_buffer_expanded)
1138 ring_buffer_resize(tr->max_buffer.buffer, 1,
1139 RING_BUFFER_ALL_CPUS);
1140 }
1141#endif
1142
1143 printk(KERN_CONT "PASSED\n");
1144 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001145}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001146#else
1147static inline int run_tracer_selftest(struct tracer *type)
1148{
1149 return 0;
1150}
1151#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001153/**
1154 * register_tracer - register a tracer with the ftrace system.
1155 * @type - the plugin for the tracer
1156 *
1157 * Register a new plugin tracer.
1158 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001159int register_tracer(struct tracer *type)
1160{
1161 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162 int ret = 0;
1163
1164 if (!type->name) {
1165 pr_info("Tracer must have a name\n");
1166 return -1;
1167 }
1168
Dan Carpenter24a461d2010-07-10 12:06:44 +02001169 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001170 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1171 return -1;
1172 }
1173
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001175
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001176 tracing_selftest_running = true;
1177
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001178 for (t = trace_types; t; t = t->next) {
1179 if (strcmp(type->name, t->name) == 0) {
1180 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001181 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001182 type->name);
1183 ret = -1;
1184 goto out;
1185 }
1186 }
1187
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001188 if (!type->set_flag)
1189 type->set_flag = &dummy_set_flag;
1190 if (!type->flags)
1191 type->flags = &dummy_tracer_flags;
1192 else
1193 if (!type->flags->opts)
1194 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001195
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001196 ret = run_tracer_selftest(type);
1197 if (ret < 0)
1198 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001199
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001200 type->next = trace_types;
1201 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001203 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001204 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001205 mutex_unlock(&trace_types_lock);
1206
Steven Rostedtdac74942009-02-05 01:13:38 -05001207 if (ret || !default_bootup_tracer)
1208 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001209
Li Zefanee6c2c12009-09-18 14:06:47 +08001210 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001211 goto out_unlock;
1212
1213 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1214 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001215 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001216 default_bootup_tracer = NULL;
1217 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001218 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001219#ifdef CONFIG_FTRACE_STARTUP_TEST
1220 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1221 type->name);
1222#endif
1223
1224 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225 return ret;
1226}
1227
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001228void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001229{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001230 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001231
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001232 if (!buffer)
1233 return;
1234
Steven Rostedtf6339032009-09-04 12:35:16 -04001235 ring_buffer_record_disable(buffer);
1236
1237 /* Make sure all commits have finished */
1238 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001239 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001240
1241 ring_buffer_record_enable(buffer);
1242}
1243
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001244void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001245{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001247 int cpu;
1248
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001249 if (!buffer)
1250 return;
1251
Steven Rostedt621968c2009-09-04 12:02:35 -04001252 ring_buffer_record_disable(buffer);
1253
1254 /* Make sure all commits have finished */
1255 synchronize_sched();
1256
Alexander Z Lam94571582013-08-02 18:36:16 -07001257 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001258
1259 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001260 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001261
1262 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001263}
1264
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001265/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001266void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001267{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001268 struct trace_array *tr;
1269
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001270 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001271 tracing_reset_online_cpus(&tr->trace_buffer);
1272#ifdef CONFIG_TRACER_MAX_TRACE
1273 tracing_reset_online_cpus(&tr->max_buffer);
1274#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001275 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001276}
1277
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001278#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001279#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001280static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001281struct saved_cmdlines_buffer {
1282 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1283 unsigned *map_cmdline_to_pid;
1284 unsigned cmdline_num;
1285 int cmdline_idx;
1286 char *saved_cmdlines;
1287};
1288static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001289
Steven Rostedt25b0b442008-05-12 21:21:00 +02001290/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001291static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001292
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001293static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001294{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001295 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1296}
1297
1298static inline void set_cmdline(int idx, const char *cmdline)
1299{
1300 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1301}
1302
1303static int allocate_cmdlines_buffer(unsigned int val,
1304 struct saved_cmdlines_buffer *s)
1305{
1306 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1307 GFP_KERNEL);
1308 if (!s->map_cmdline_to_pid)
1309 return -ENOMEM;
1310
1311 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1312 if (!s->saved_cmdlines) {
1313 kfree(s->map_cmdline_to_pid);
1314 return -ENOMEM;
1315 }
1316
1317 s->cmdline_idx = 0;
1318 s->cmdline_num = val;
1319 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1320 sizeof(s->map_pid_to_cmdline));
1321 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1322 val * sizeof(*s->map_cmdline_to_pid));
1323
1324 return 0;
1325}
1326
1327static int trace_create_savedcmd(void)
1328{
1329 int ret;
1330
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001331 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001332 if (!savedcmd)
1333 return -ENOMEM;
1334
1335 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1336 if (ret < 0) {
1337 kfree(savedcmd);
1338 savedcmd = NULL;
1339 return -ENOMEM;
1340 }
1341
1342 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001343}
1344
Carsten Emdeb5130b12009-09-13 01:43:07 +02001345int is_tracing_stopped(void)
1346{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001347 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001348}
1349
Steven Rostedt0f048702008-11-05 16:05:44 -05001350/**
1351 * tracing_start - quick start of the tracer
1352 *
1353 * If tracing is enabled but was stopped by tracing_stop,
1354 * this will start the tracer back up.
1355 */
1356void tracing_start(void)
1357{
1358 struct ring_buffer *buffer;
1359 unsigned long flags;
1360
1361 if (tracing_disabled)
1362 return;
1363
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001364 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1365 if (--global_trace.stop_count) {
1366 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001367 /* Someone screwed up their debugging */
1368 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001369 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001370 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001371 goto out;
1372 }
1373
Steven Rostedta2f80712010-03-12 19:56:00 -05001374 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001375 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001376
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001377 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001378 if (buffer)
1379 ring_buffer_record_enable(buffer);
1380
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001381#ifdef CONFIG_TRACER_MAX_TRACE
1382 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001383 if (buffer)
1384 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001385#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001386
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001387 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001388
Steven Rostedt0f048702008-11-05 16:05:44 -05001389 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001390 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1391}
1392
1393static void tracing_start_tr(struct trace_array *tr)
1394{
1395 struct ring_buffer *buffer;
1396 unsigned long flags;
1397
1398 if (tracing_disabled)
1399 return;
1400
1401 /* If global, we need to also start the max tracer */
1402 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1403 return tracing_start();
1404
1405 raw_spin_lock_irqsave(&tr->start_lock, flags);
1406
1407 if (--tr->stop_count) {
1408 if (tr->stop_count < 0) {
1409 /* Someone screwed up their debugging */
1410 WARN_ON_ONCE(1);
1411 tr->stop_count = 0;
1412 }
1413 goto out;
1414 }
1415
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001416 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001417 if (buffer)
1418 ring_buffer_record_enable(buffer);
1419
1420 out:
1421 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001422}
1423
1424/**
1425 * tracing_stop - quick stop of the tracer
1426 *
1427 * Light weight way to stop tracing. Use in conjunction with
1428 * tracing_start.
1429 */
1430void tracing_stop(void)
1431{
1432 struct ring_buffer *buffer;
1433 unsigned long flags;
1434
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001435 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1436 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001437 goto out;
1438
Steven Rostedta2f80712010-03-12 19:56:00 -05001439 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001440 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001441
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001442 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001443 if (buffer)
1444 ring_buffer_record_disable(buffer);
1445
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001446#ifdef CONFIG_TRACER_MAX_TRACE
1447 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001448 if (buffer)
1449 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001450#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001451
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001452 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001453
Steven Rostedt0f048702008-11-05 16:05:44 -05001454 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001455 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1456}
1457
1458static void tracing_stop_tr(struct trace_array *tr)
1459{
1460 struct ring_buffer *buffer;
1461 unsigned long flags;
1462
1463 /* If global, we need to also stop the max tracer */
1464 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1465 return tracing_stop();
1466
1467 raw_spin_lock_irqsave(&tr->start_lock, flags);
1468 if (tr->stop_count++)
1469 goto out;
1470
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001471 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001472 if (buffer)
1473 ring_buffer_record_disable(buffer);
1474
1475 out:
1476 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001477}
1478
Ingo Molnare309b412008-05-12 21:20:51 +02001479void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001481static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482{
Carsten Emdea635cf02009-03-18 09:00:41 +01001483 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484
1485 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001486 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001487
1488 /*
1489 * It's not the end of the world if we don't get
1490 * the lock, but we also don't want to spin
1491 * nor do we want to disable interrupts,
1492 * so if we miss here, then better luck next time.
1493 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001494 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001495 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001496
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001497 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001498 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001499 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001500
Carsten Emdea635cf02009-03-18 09:00:41 +01001501 /*
1502 * Check whether the cmdline buffer at idx has a pid
1503 * mapped. We are going to overwrite that entry so we
1504 * need to clear the map_pid_to_cmdline. Otherwise we
1505 * would read the new comm for the old pid.
1506 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001507 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001508 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001510
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001511 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1512 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515 }
1516
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001517 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001518
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001519 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001520
1521 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522}
1523
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001524static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 unsigned map;
1527
Steven Rostedt4ca53082009-03-16 19:20:15 -04001528 if (!pid) {
1529 strcpy(comm, "<idle>");
1530 return;
1531 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532
Steven Rostedt74bf4072010-01-25 15:11:53 -05001533 if (WARN_ON_ONCE(pid < 0)) {
1534 strcpy(comm, "<XXX>");
1535 return;
1536 }
1537
Steven Rostedt4ca53082009-03-16 19:20:15 -04001538 if (pid > PID_MAX_DEFAULT) {
1539 strcpy(comm, "<...>");
1540 return;
1541 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001543 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001544 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001545 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001546 else
1547 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001548}
1549
1550void trace_find_cmdline(int pid, char comm[])
1551{
1552 preempt_disable();
1553 arch_spin_lock(&trace_cmdline_lock);
1554
1555 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001556
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001557 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001558 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559}
1560
Ingo Molnare309b412008-05-12 21:20:51 +02001561void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001563 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001564 return;
1565
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001566 if (!__this_cpu_read(trace_cmdline_save))
1567 return;
1568
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001569 if (trace_save_cmdline(tsk))
1570 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571}
1572
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001573void
Steven Rostedt38697052008-10-01 13:14:09 -04001574tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1575 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576{
1577 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001578
Steven Rostedt777e2082008-09-29 23:02:42 -04001579 entry->preempt_count = pc & 0xff;
1580 entry->pid = (tsk) ? tsk->pid : 0;
1581 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001582#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001583 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001584#else
1585 TRACE_FLAG_IRQS_NOSUPPORT |
1586#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001587 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1588 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001589 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1590 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001592EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593
Steven Rostedte77405a2009-09-02 14:17:06 -04001594struct ring_buffer_event *
1595trace_buffer_lock_reserve(struct ring_buffer *buffer,
1596 int type,
1597 unsigned long len,
1598 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001599{
1600 struct ring_buffer_event *event;
1601
Steven Rostedte77405a2009-09-02 14:17:06 -04001602 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001603 if (event != NULL) {
1604 struct trace_entry *ent = ring_buffer_event_data(event);
1605
1606 tracing_generic_entry_update(ent, flags, pc);
1607 ent->type = type;
1608 }
1609
1610 return event;
1611}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001612
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001613void
1614__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1615{
1616 __this_cpu_write(trace_cmdline_save, true);
1617 ring_buffer_unlock_commit(buffer, event);
1618}
1619
Steven Rostedte77405a2009-09-02 14:17:06 -04001620static inline void
1621__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1622 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001623 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001624{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001625 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001626
Steven Rostedte77405a2009-09-02 14:17:06 -04001627 ftrace_trace_stack(buffer, flags, 6, pc);
1628 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001629}
1630
Steven Rostedte77405a2009-09-02 14:17:06 -04001631void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1632 struct ring_buffer_event *event,
1633 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001634{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001635 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001636}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001637EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001638
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001639static struct ring_buffer *temp_buffer;
1640
Steven Rostedtef5580d2009-02-27 19:38:04 -05001641struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001642trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1643 struct ftrace_event_file *ftrace_file,
1644 int type, unsigned long len,
1645 unsigned long flags, int pc)
1646{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001647 struct ring_buffer_event *entry;
1648
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001649 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001650 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001651 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001652 /*
1653 * If tracing is off, but we have triggers enabled
1654 * we still need to look at the event data. Use the temp_buffer
1655 * to store the trace event for the tigger to use. It's recusive
1656 * safe and will not be recorded anywhere.
1657 */
1658 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1659 *current_rb = temp_buffer;
1660 entry = trace_buffer_lock_reserve(*current_rb,
1661 type, len, flags, pc);
1662 }
1663 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001664}
1665EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1666
1667struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001668trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1669 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001670 unsigned long flags, int pc)
1671{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001672 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001673 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001674 type, len, flags, pc);
1675}
Steven Rostedt94487d62009-05-05 19:22:53 -04001676EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001677
Steven Rostedte77405a2009-09-02 14:17:06 -04001678void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1679 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001680 unsigned long flags, int pc)
1681{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001682 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001683}
Steven Rostedt94487d62009-05-05 19:22:53 -04001684EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001685
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001686void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1687 struct ring_buffer_event *event,
1688 unsigned long flags, int pc,
1689 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001690{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001691 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001692
1693 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1694 ftrace_trace_userstack(buffer, flags, pc);
1695}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001696EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001697
Steven Rostedte77405a2009-09-02 14:17:06 -04001698void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1699 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001700{
Steven Rostedte77405a2009-09-02 14:17:06 -04001701 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001702}
Steven Rostedt12acd472009-04-17 16:01:56 -04001703EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001704
Ingo Molnare309b412008-05-12 21:20:51 +02001705void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001706trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001707 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1708 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001709{
Tom Zanussie1112b42009-03-31 00:48:49 -05001710 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001711 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001712 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001713 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001714
Steven Rostedtd7690412008-10-01 00:29:53 -04001715 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001716 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001717 return;
1718
Steven Rostedte77405a2009-09-02 14:17:06 -04001719 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001720 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001721 if (!event)
1722 return;
1723 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001724 entry->ip = ip;
1725 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001726
Tom Zanussif306cc82013-10-24 08:34:17 -05001727 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001728 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001729}
1730
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001731#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001732
1733#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1734struct ftrace_stack {
1735 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1736};
1737
1738static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1739static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1740
Steven Rostedte77405a2009-09-02 14:17:06 -04001741static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001742 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001743 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001744{
Tom Zanussie1112b42009-03-31 00:48:49 -05001745 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001746 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001747 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001748 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001749 int use_stack;
1750 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001751
1752 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001753 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001754
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001755 /*
1756 * Since events can happen in NMIs there's no safe way to
1757 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1758 * or NMI comes in, it will just have to use the default
1759 * FTRACE_STACK_SIZE.
1760 */
1761 preempt_disable_notrace();
1762
Shan Wei82146522012-11-19 13:21:01 +08001763 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001764 /*
1765 * We don't need any atomic variables, just a barrier.
1766 * If an interrupt comes in, we don't care, because it would
1767 * have exited and put the counter back to what we want.
1768 * We just need a barrier to keep gcc from moving things
1769 * around.
1770 */
1771 barrier();
1772 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001773 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001774 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1775
1776 if (regs)
1777 save_stack_trace_regs(regs, &trace);
1778 else
1779 save_stack_trace(&trace);
1780
1781 if (trace.nr_entries > size)
1782 size = trace.nr_entries;
1783 } else
1784 /* From now on, use_stack is a boolean */
1785 use_stack = 0;
1786
1787 size *= sizeof(unsigned long);
1788
1789 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1790 sizeof(*entry) + size, flags, pc);
1791 if (!event)
1792 goto out;
1793 entry = ring_buffer_event_data(event);
1794
1795 memset(&entry->caller, 0, size);
1796
1797 if (use_stack)
1798 memcpy(&entry->caller, trace.entries,
1799 trace.nr_entries * sizeof(unsigned long));
1800 else {
1801 trace.max_entries = FTRACE_STACK_ENTRIES;
1802 trace.entries = entry->caller;
1803 if (regs)
1804 save_stack_trace_regs(regs, &trace);
1805 else
1806 save_stack_trace(&trace);
1807 }
1808
1809 entry->size = trace.nr_entries;
1810
Tom Zanussif306cc82013-10-24 08:34:17 -05001811 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001812 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001813
1814 out:
1815 /* Again, don't let gcc optimize things here */
1816 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001817 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001818 preempt_enable_notrace();
1819
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001820}
1821
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001822void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1823 int skip, int pc, struct pt_regs *regs)
1824{
1825 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1826 return;
1827
1828 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1829}
1830
Steven Rostedte77405a2009-09-02 14:17:06 -04001831void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1832 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001833{
1834 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1835 return;
1836
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001837 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001838}
1839
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001840void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1841 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001842{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001843 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001844}
1845
Steven Rostedt03889382009-12-11 09:48:22 -05001846/**
1847 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001848 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001849 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001850void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001851{
1852 unsigned long flags;
1853
1854 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001855 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001856
1857 local_save_flags(flags);
1858
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001859 /*
1860 * Skip 3 more, seems to get us at the caller of
1861 * this function.
1862 */
1863 skip += 3;
1864 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1865 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001866}
1867
Steven Rostedt91e86e52010-11-10 12:56:12 +01001868static DEFINE_PER_CPU(int, user_stack_count);
1869
Steven Rostedte77405a2009-09-02 14:17:06 -04001870void
1871ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001872{
Tom Zanussie1112b42009-03-31 00:48:49 -05001873 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001874 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001875 struct userstack_entry *entry;
1876 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001877
1878 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1879 return;
1880
Steven Rostedtb6345872010-03-12 20:03:30 -05001881 /*
1882 * NMIs can not handle page faults, even with fix ups.
1883 * The save user stack can (and often does) fault.
1884 */
1885 if (unlikely(in_nmi()))
1886 return;
1887
Steven Rostedt91e86e52010-11-10 12:56:12 +01001888 /*
1889 * prevent recursion, since the user stack tracing may
1890 * trigger other kernel events.
1891 */
1892 preempt_disable();
1893 if (__this_cpu_read(user_stack_count))
1894 goto out;
1895
1896 __this_cpu_inc(user_stack_count);
1897
Steven Rostedte77405a2009-09-02 14:17:06 -04001898 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001899 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001900 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001901 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001902 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001903
Steven Rostedt48659d32009-09-11 11:36:23 -04001904 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001905 memset(&entry->caller, 0, sizeof(entry->caller));
1906
1907 trace.nr_entries = 0;
1908 trace.max_entries = FTRACE_STACK_ENTRIES;
1909 trace.skip = 0;
1910 trace.entries = entry->caller;
1911
1912 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001913 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001914 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001915
Li Zefan1dbd1952010-12-09 15:47:56 +08001916 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001917 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001918 out:
1919 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001920}
1921
Hannes Eder4fd27352009-02-10 19:44:12 +01001922#ifdef UNUSED
1923static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001924{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001925 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001926}
Hannes Eder4fd27352009-02-10 19:44:12 +01001927#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001928
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001929#endif /* CONFIG_STACKTRACE */
1930
Steven Rostedt07d777f2011-09-22 14:01:55 -04001931/* created for use with alloc_percpu */
1932struct trace_buffer_struct {
1933 char buffer[TRACE_BUF_SIZE];
1934};
1935
1936static struct trace_buffer_struct *trace_percpu_buffer;
1937static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1938static struct trace_buffer_struct *trace_percpu_irq_buffer;
1939static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1940
1941/*
1942 * The buffer used is dependent on the context. There is a per cpu
1943 * buffer for normal context, softirq contex, hard irq context and
1944 * for NMI context. Thise allows for lockless recording.
1945 *
1946 * Note, if the buffers failed to be allocated, then this returns NULL
1947 */
1948static char *get_trace_buf(void)
1949{
1950 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001951
1952 /*
1953 * If we have allocated per cpu buffers, then we do not
1954 * need to do any locking.
1955 */
1956 if (in_nmi())
1957 percpu_buffer = trace_percpu_nmi_buffer;
1958 else if (in_irq())
1959 percpu_buffer = trace_percpu_irq_buffer;
1960 else if (in_softirq())
1961 percpu_buffer = trace_percpu_sirq_buffer;
1962 else
1963 percpu_buffer = trace_percpu_buffer;
1964
1965 if (!percpu_buffer)
1966 return NULL;
1967
Shan Weid8a03492012-11-13 09:53:04 +08001968 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001969}
1970
1971static int alloc_percpu_trace_buffer(void)
1972{
1973 struct trace_buffer_struct *buffers;
1974 struct trace_buffer_struct *sirq_buffers;
1975 struct trace_buffer_struct *irq_buffers;
1976 struct trace_buffer_struct *nmi_buffers;
1977
1978 buffers = alloc_percpu(struct trace_buffer_struct);
1979 if (!buffers)
1980 goto err_warn;
1981
1982 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1983 if (!sirq_buffers)
1984 goto err_sirq;
1985
1986 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1987 if (!irq_buffers)
1988 goto err_irq;
1989
1990 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1991 if (!nmi_buffers)
1992 goto err_nmi;
1993
1994 trace_percpu_buffer = buffers;
1995 trace_percpu_sirq_buffer = sirq_buffers;
1996 trace_percpu_irq_buffer = irq_buffers;
1997 trace_percpu_nmi_buffer = nmi_buffers;
1998
1999 return 0;
2000
2001 err_nmi:
2002 free_percpu(irq_buffers);
2003 err_irq:
2004 free_percpu(sirq_buffers);
2005 err_sirq:
2006 free_percpu(buffers);
2007 err_warn:
2008 WARN(1, "Could not allocate percpu trace_printk buffer");
2009 return -ENOMEM;
2010}
2011
Steven Rostedt81698832012-10-11 10:15:05 -04002012static int buffers_allocated;
2013
Steven Rostedt07d777f2011-09-22 14:01:55 -04002014void trace_printk_init_buffers(void)
2015{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002016 if (buffers_allocated)
2017 return;
2018
2019 if (alloc_percpu_trace_buffer())
2020 return;
2021
Steven Rostedt2184db42014-05-28 13:14:40 -04002022 /* trace_printk() is for debug use only. Don't use it in production. */
2023
2024 pr_warning("\n**********************************************************\n");
2025 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2026 pr_warning("** **\n");
2027 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2028 pr_warning("** **\n");
2029 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2030 pr_warning("** unsafe for produciton use. **\n");
2031 pr_warning("** **\n");
2032 pr_warning("** If you see this message and you are not debugging **\n");
2033 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2034 pr_warning("** **\n");
2035 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2036 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002037
Steven Rostedtb382ede62012-10-10 21:44:34 -04002038 /* Expand the buffers to set size */
2039 tracing_update_buffers();
2040
Steven Rostedt07d777f2011-09-22 14:01:55 -04002041 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002042
2043 /*
2044 * trace_printk_init_buffers() can be called by modules.
2045 * If that happens, then we need to start cmdline recording
2046 * directly here. If the global_trace.buffer is already
2047 * allocated here, then this was called by module code.
2048 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002049 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002050 tracing_start_cmdline_record();
2051}
2052
2053void trace_printk_start_comm(void)
2054{
2055 /* Start tracing comms if trace printk is set */
2056 if (!buffers_allocated)
2057 return;
2058 tracing_start_cmdline_record();
2059}
2060
2061static void trace_printk_start_stop_comm(int enabled)
2062{
2063 if (!buffers_allocated)
2064 return;
2065
2066 if (enabled)
2067 tracing_start_cmdline_record();
2068 else
2069 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002070}
2071
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002072/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002073 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074 *
2075 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002076int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002077{
Tom Zanussie1112b42009-03-31 00:48:49 -05002078 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002079 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002080 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002081 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002082 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002083 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002084 char *tbuffer;
2085 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002086
2087 if (unlikely(tracing_selftest_running || tracing_disabled))
2088 return 0;
2089
2090 /* Don't pollute graph traces with trace_vprintk internals */
2091 pause_graph_tracing();
2092
2093 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002094 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002095
Steven Rostedt07d777f2011-09-22 14:01:55 -04002096 tbuffer = get_trace_buf();
2097 if (!tbuffer) {
2098 len = 0;
2099 goto out;
2100 }
2101
2102 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2103
2104 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002105 goto out;
2106
Steven Rostedt07d777f2011-09-22 14:01:55 -04002107 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002108 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002109 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002110 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2111 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002112 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002113 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002114 entry = ring_buffer_event_data(event);
2115 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002116 entry->fmt = fmt;
2117
Steven Rostedt07d777f2011-09-22 14:01:55 -04002118 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002119 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002120 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002121 ftrace_trace_stack(buffer, flags, 6, pc);
2122 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002125 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 unpause_graph_tracing();
2127
2128 return len;
2129}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002130EXPORT_SYMBOL_GPL(trace_vbprintk);
2131
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002132static int
2133__trace_array_vprintk(struct ring_buffer *buffer,
2134 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002135{
Tom Zanussie1112b42009-03-31 00:48:49 -05002136 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002138 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002139 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002140 unsigned long flags;
2141 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002142
2143 if (tracing_disabled || tracing_selftest_running)
2144 return 0;
2145
Steven Rostedt07d777f2011-09-22 14:01:55 -04002146 /* Don't pollute graph traces with trace_vprintk internals */
2147 pause_graph_tracing();
2148
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002149 pc = preempt_count();
2150 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151
Steven Rostedt07d777f2011-09-22 14:01:55 -04002152
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
2156 goto out;
2157 }
2158
2159 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2160 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002161 goto out;
2162
Steven Rostedt07d777f2011-09-22 14:01:55 -04002163 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002165 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002166 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002168 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002169 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002170 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002171
Steven Rostedt07d777f2011-09-22 14:01:55 -04002172 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002173 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002174 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002175 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002176 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002177 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002178 out:
2179 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002180 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181
2182 return len;
2183}
Steven Rostedt659372d2009-09-03 19:11:07 -04002184
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002185int trace_array_vprintk(struct trace_array *tr,
2186 unsigned long ip, const char *fmt, va_list args)
2187{
2188 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2189}
2190
2191int trace_array_printk(struct trace_array *tr,
2192 unsigned long ip, const char *fmt, ...)
2193{
2194 int ret;
2195 va_list ap;
2196
2197 if (!(trace_flags & TRACE_ITER_PRINTK))
2198 return 0;
2199
2200 va_start(ap, fmt);
2201 ret = trace_array_vprintk(tr, ip, fmt, ap);
2202 va_end(ap);
2203 return ret;
2204}
2205
2206int trace_array_printk_buf(struct ring_buffer *buffer,
2207 unsigned long ip, const char *fmt, ...)
2208{
2209 int ret;
2210 va_list ap;
2211
2212 if (!(trace_flags & TRACE_ITER_PRINTK))
2213 return 0;
2214
2215 va_start(ap, fmt);
2216 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2217 va_end(ap);
2218 return ret;
2219}
2220
Steven Rostedt659372d2009-09-03 19:11:07 -04002221int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2222{
Steven Rostedta813a152009-10-09 01:41:35 -04002223 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002224}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002225EXPORT_SYMBOL_GPL(trace_vprintk);
2226
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002227static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002228{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002229 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2230
Steven Rostedt5a90f572008-09-03 17:42:51 -04002231 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002232 if (buf_iter)
2233 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002234}
2235
Ingo Molnare309b412008-05-12 21:20:51 +02002236static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002237peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2238 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002239{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002240 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002242
Steven Rostedtd7690412008-10-01 00:29:53 -04002243 if (buf_iter)
2244 event = ring_buffer_iter_peek(buf_iter, ts);
2245 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002246 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002247 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002248
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002249 if (event) {
2250 iter->ent_size = ring_buffer_event_length(event);
2251 return ring_buffer_event_data(event);
2252 }
2253 iter->ent_size = 0;
2254 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002255}
Steven Rostedtd7690412008-10-01 00:29:53 -04002256
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002257static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002258__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2259 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002260{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002261 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002262 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002263 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002264 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002265 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002266 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002267 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002268 int cpu;
2269
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002270 /*
2271 * If we are in a per_cpu trace file, don't bother by iterating over
2272 * all cpu and peek directly.
2273 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002274 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002275 if (ring_buffer_empty_cpu(buffer, cpu_file))
2276 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002277 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002278 if (ent_cpu)
2279 *ent_cpu = cpu_file;
2280
2281 return ent;
2282 }
2283
Steven Rostedtab464282008-05-12 21:21:00 +02002284 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002285
2286 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002287 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002288
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002290
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002291 /*
2292 * Pick the entry with the smallest timestamp:
2293 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002294 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002295 next = ent;
2296 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002298 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002299 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 }
2301 }
2302
Steven Rostedt12b5da32012-03-27 10:43:28 -04002303 iter->ent_size = next_size;
2304
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 if (ent_cpu)
2306 *ent_cpu = next_cpu;
2307
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002308 if (ent_ts)
2309 *ent_ts = next_ts;
2310
Steven Rostedtbc21b472010-03-31 19:49:26 -04002311 if (missing_events)
2312 *missing_events = next_lost;
2313
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002314 return next;
2315}
2316
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002317/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002318struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2319 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002320{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002321 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002322}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002323
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002324/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002325void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002326{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002327 iter->ent = __find_next_entry(iter, &iter->cpu,
2328 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002329
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002330 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002331 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002332
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002333 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002334}
2335
Ingo Molnare309b412008-05-12 21:20:51 +02002336static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002337{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002338 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002339 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002340}
2341
Ingo Molnare309b412008-05-12 21:20:51 +02002342static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002343{
2344 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002345 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002346 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002348 WARN_ON_ONCE(iter->leftover);
2349
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002350 (*pos)++;
2351
2352 /* can't go backwards */
2353 if (iter->idx > i)
2354 return NULL;
2355
2356 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002357 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002358 else
2359 ent = iter;
2360
2361 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002362 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002363
2364 iter->pos = *pos;
2365
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002366 return ent;
2367}
2368
Jason Wessel955b61e2010-08-05 09:22:23 -05002369void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002370{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002371 struct ring_buffer_event *event;
2372 struct ring_buffer_iter *buf_iter;
2373 unsigned long entries = 0;
2374 u64 ts;
2375
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002376 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002377
Steven Rostedt6d158a82012-06-27 20:46:14 -04002378 buf_iter = trace_buffer_iter(iter, cpu);
2379 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002380 return;
2381
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382 ring_buffer_iter_reset(buf_iter);
2383
2384 /*
2385 * We could have the case with the max latency tracers
2386 * that a reset never took place on a cpu. This is evident
2387 * by the timestamp being before the start of the buffer.
2388 */
2389 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002390 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002391 break;
2392 entries++;
2393 ring_buffer_read(buf_iter, NULL);
2394 }
2395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002396 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002397}
2398
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002399/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002400 * The current tracer is copied to avoid a global locking
2401 * all around.
2402 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403static void *s_start(struct seq_file *m, loff_t *pos)
2404{
2405 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002406 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002407 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002408 void *p = NULL;
2409 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002410 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002412 /*
2413 * copy the tracer to avoid using a global lock all around.
2414 * iter->trace is a copy of current_trace, the pointer to the
2415 * name may be used instead of a strcmp(), as iter->trace->name
2416 * will point to the same string as current_trace->name.
2417 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002419 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2420 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002421 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002422
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002423#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002424 if (iter->snapshot && iter->trace->use_max_tr)
2425 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002426#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002427
2428 if (!iter->snapshot)
2429 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002431 if (*pos != iter->pos) {
2432 iter->ent = NULL;
2433 iter->cpu = 0;
2434 iter->idx = -1;
2435
Steven Rostedtae3b5092013-01-23 15:22:59 -05002436 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002437 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002438 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002439 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002440 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002441
Lai Jiangshanac91d852010-03-02 17:54:50 +08002442 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2444 ;
2445
2446 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002447 /*
2448 * If we overflowed the seq_file before, then we want
2449 * to just reuse the trace_seq buffer again.
2450 */
2451 if (iter->leftover)
2452 p = iter;
2453 else {
2454 l = *pos - 1;
2455 p = s_next(m, p, &l);
2456 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002457 }
2458
Lai Jiangshan4f535962009-05-18 19:35:34 +08002459 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002460 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002461 return p;
2462}
2463
2464static void s_stop(struct seq_file *m, void *p)
2465{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002466 struct trace_iterator *iter = m->private;
2467
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002468#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002469 if (iter->snapshot && iter->trace->use_max_tr)
2470 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002471#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002472
2473 if (!iter->snapshot)
2474 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002476 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002477 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478}
2479
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002480static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481get_total_entries(struct trace_buffer *buf,
2482 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002483{
2484 unsigned long count;
2485 int cpu;
2486
2487 *total = 0;
2488 *entries = 0;
2489
2490 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492 /*
2493 * If this buffer has skipped entries, then we hold all
2494 * entries for the trace and we need to ignore the
2495 * ones before the time stamp.
2496 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002497 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2498 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002499 /* total is the same as the entries */
2500 *total += count;
2501 } else
2502 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002503 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504 *entries += count;
2505 }
2506}
2507
Ingo Molnare309b412008-05-12 21:20:51 +02002508static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509{
Michael Ellermana6168352008-08-20 16:36:11 -07002510 seq_puts(m, "# _------=> CPU# \n");
2511 seq_puts(m, "# / _-----=> irqs-off \n");
2512 seq_puts(m, "# | / _----=> need-resched \n");
2513 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2514 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002515 seq_puts(m, "# |||| / delay \n");
2516 seq_puts(m, "# cmd pid ||||| time | caller \n");
2517 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002518}
2519
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002520static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002522 unsigned long total;
2523 unsigned long entries;
2524
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002525 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002526 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2527 entries, total, num_online_cpus());
2528 seq_puts(m, "#\n");
2529}
2530
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002531static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002532{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002533 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002534 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002535 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536}
2537
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002538static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002539{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002540 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002541 seq_puts(m, "# _-----=> irqs-off\n");
2542 seq_puts(m, "# / _----=> need-resched\n");
2543 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2544 seq_puts(m, "# || / _--=> preempt-depth\n");
2545 seq_puts(m, "# ||| / delay\n");
2546 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2547 seq_puts(m, "# | | | |||| | |\n");
2548}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002549
Jiri Olsa62b915f2010-04-02 19:01:22 +02002550void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002551print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2552{
2553 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002554 struct trace_buffer *buf = iter->trace_buffer;
2555 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002556 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002557 unsigned long entries;
2558 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002559 const char *name = "preemption";
2560
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002561 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002562
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002563 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002564
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002565 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002567 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002568 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002569 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002570 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002571 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002573 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002574 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575#if defined(CONFIG_PREEMPT_NONE)
2576 "server",
2577#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2578 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002579#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 "preempt",
2581#else
2582 "unknown",
2583#endif
2584 /* These are reserved for later use */
2585 0, 0, 0, 0);
2586#ifdef CONFIG_SMP
2587 seq_printf(m, " #P:%d)\n", num_online_cpus());
2588#else
2589 seq_puts(m, ")\n");
2590#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002591 seq_puts(m, "# -----------------\n");
2592 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002593 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002594 data->comm, data->pid,
2595 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002597 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598
2599 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002600 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002601 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2602 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002604 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2605 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002606 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002607 }
2608
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002609 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610}
2611
Steven Rostedta3097202008-11-07 22:36:02 -05002612static void test_cpu_buff_start(struct trace_iterator *iter)
2613{
2614 struct trace_seq *s = &iter->seq;
2615
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002616 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2617 return;
2618
2619 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2620 return;
2621
Rusty Russell44623442009-01-01 10:12:23 +10302622 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002623 return;
2624
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002625 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002626 return;
2627
Rusty Russell44623442009-01-01 10:12:23 +10302628 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002629
2630 /* Don't print started cpu buffer for the first entry of the trace */
2631 if (iter->idx > 1)
2632 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2633 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002634}
2635
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002636static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002637{
Steven Rostedt214023c2008-05-12 21:20:46 +02002638 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002639 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002640 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002641 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002642
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002643 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002644
Steven Rostedta3097202008-11-07 22:36:02 -05002645 test_cpu_buff_start(iter);
2646
Steven Rostedtf633cef2008-12-23 23:24:13 -05002647 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002648
2649 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002650 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2651 if (!trace_print_lat_context(iter))
2652 goto partial;
2653 } else {
2654 if (!trace_print_context(iter))
2655 goto partial;
2656 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002657 }
2658
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002659 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002660 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002661
2662 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2663 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002664
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002665 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002666partial:
2667 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002668}
2669
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002670static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002671{
2672 struct trace_seq *s = &iter->seq;
2673 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002674 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002675
2676 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002677
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002678 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002679 if (!trace_seq_printf(s, "%d %d %llu ",
2680 entry->pid, iter->cpu, iter->ts))
2681 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002682 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002683
Steven Rostedtf633cef2008-12-23 23:24:13 -05002684 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002685 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002686 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002687
2688 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2689 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002690
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002691 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002692partial:
2693 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002694}
2695
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002696static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002697{
2698 struct trace_seq *s = &iter->seq;
2699 unsigned char newline = '\n';
2700 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002701 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002702
2703 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002704
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002705 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2706 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2707 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2708 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2709 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002710
Steven Rostedtf633cef2008-12-23 23:24:13 -05002711 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002712 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002713 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002714 if (ret != TRACE_TYPE_HANDLED)
2715 return ret;
2716 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002717
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002718 SEQ_PUT_FIELD_RET(s, newline);
2719
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002720 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002721}
2722
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002723static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002724{
2725 struct trace_seq *s = &iter->seq;
2726 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002727 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002728
2729 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002730
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002731 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2732 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002733 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002734 SEQ_PUT_FIELD_RET(s, iter->ts);
2735 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002736
Steven Rostedtf633cef2008-12-23 23:24:13 -05002737 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002738 return event ? event->funcs->binary(iter, 0, event) :
2739 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002740}
2741
Jiri Olsa62b915f2010-04-02 19:01:22 +02002742int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002743{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002744 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745 int cpu;
2746
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002747 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002748 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002749 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002750 buf_iter = trace_buffer_iter(iter, cpu);
2751 if (buf_iter) {
2752 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002753 return 0;
2754 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002755 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002756 return 0;
2757 }
2758 return 1;
2759 }
2760
Steven Rostedtab464282008-05-12 21:21:00 +02002761 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002762 buf_iter = trace_buffer_iter(iter, cpu);
2763 if (buf_iter) {
2764 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002765 return 0;
2766 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002767 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002768 return 0;
2769 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002770 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002771
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002772 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002773}
2774
Lai Jiangshan4f535962009-05-18 19:35:34 +08002775/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002776enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002777{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002778 enum print_line_t ret;
2779
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002780 if (iter->lost_events &&
2781 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2782 iter->cpu, iter->lost_events))
2783 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002784
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002785 if (iter->trace && iter->trace->print_line) {
2786 ret = iter->trace->print_line(iter);
2787 if (ret != TRACE_TYPE_UNHANDLED)
2788 return ret;
2789 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002790
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002791 if (iter->ent->type == TRACE_BPUTS &&
2792 trace_flags & TRACE_ITER_PRINTK &&
2793 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2794 return trace_print_bputs_msg_only(iter);
2795
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002796 if (iter->ent->type == TRACE_BPRINT &&
2797 trace_flags & TRACE_ITER_PRINTK &&
2798 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002799 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002800
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002801 if (iter->ent->type == TRACE_PRINT &&
2802 trace_flags & TRACE_ITER_PRINTK &&
2803 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002804 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002805
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002806 if (trace_flags & TRACE_ITER_BIN)
2807 return print_bin_fmt(iter);
2808
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002809 if (trace_flags & TRACE_ITER_HEX)
2810 return print_hex_fmt(iter);
2811
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002812 if (trace_flags & TRACE_ITER_RAW)
2813 return print_raw_fmt(iter);
2814
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002815 return print_trace_fmt(iter);
2816}
2817
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002818void trace_latency_header(struct seq_file *m)
2819{
2820 struct trace_iterator *iter = m->private;
2821
2822 /* print nothing if the buffers are empty */
2823 if (trace_empty(iter))
2824 return;
2825
2826 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2827 print_trace_header(m, iter);
2828
2829 if (!(trace_flags & TRACE_ITER_VERBOSE))
2830 print_lat_help_header(m);
2831}
2832
Jiri Olsa62b915f2010-04-02 19:01:22 +02002833void trace_default_header(struct seq_file *m)
2834{
2835 struct trace_iterator *iter = m->private;
2836
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002837 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2838 return;
2839
Jiri Olsa62b915f2010-04-02 19:01:22 +02002840 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2841 /* print nothing if the buffers are empty */
2842 if (trace_empty(iter))
2843 return;
2844 print_trace_header(m, iter);
2845 if (!(trace_flags & TRACE_ITER_VERBOSE))
2846 print_lat_help_header(m);
2847 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002848 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2849 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002850 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002851 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002852 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002853 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002854 }
2855}
2856
Steven Rostedte0a413f2011-09-29 21:26:16 -04002857static void test_ftrace_alive(struct seq_file *m)
2858{
2859 if (!ftrace_is_dead())
2860 return;
2861 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2862 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2863}
2864
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002865#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002866static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002867{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002868 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2869 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2870 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002871 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002872 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2873 seq_printf(m, "# is not a '0' or '1')\n");
2874}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002875
2876static void show_snapshot_percpu_help(struct seq_file *m)
2877{
2878 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2879#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2880 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2881 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2882#else
2883 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2884 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2885#endif
2886 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2887 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2888 seq_printf(m, "# is not a '0' or '1')\n");
2889}
2890
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002891static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2892{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002893 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002894 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2895 else
2896 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2897
2898 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002899 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2900 show_snapshot_main_help(m);
2901 else
2902 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002903}
2904#else
2905/* Should never be called */
2906static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2907#endif
2908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909static int s_show(struct seq_file *m, void *v)
2910{
2911 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002912 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002913
2914 if (iter->ent == NULL) {
2915 if (iter->tr) {
2916 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2917 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002918 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002920 if (iter->snapshot && trace_empty(iter))
2921 print_snapshot_help(m, iter);
2922 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002923 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002924 else
2925 trace_default_header(m);
2926
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002927 } else if (iter->leftover) {
2928 /*
2929 * If we filled the seq_file buffer earlier, we
2930 * want to just show it now.
2931 */
2932 ret = trace_print_seq(m, &iter->seq);
2933
2934 /* ret should this time be zero, but you never know */
2935 iter->leftover = ret;
2936
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002937 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002938 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002939 ret = trace_print_seq(m, &iter->seq);
2940 /*
2941 * If we overflow the seq_file buffer, then it will
2942 * ask us for this data again at start up.
2943 * Use that instead.
2944 * ret is 0 if seq_file write succeeded.
2945 * -1 otherwise.
2946 */
2947 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002948 }
2949
2950 return 0;
2951}
2952
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002953/*
2954 * Should be used after trace_array_get(), trace_types_lock
2955 * ensures that i_cdev was already initialized.
2956 */
2957static inline int tracing_get_cpu(struct inode *inode)
2958{
2959 if (inode->i_cdev) /* See trace_create_cpu_file() */
2960 return (long)inode->i_cdev - 1;
2961 return RING_BUFFER_ALL_CPUS;
2962}
2963
James Morris88e9d342009-09-22 16:43:43 -07002964static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002965 .start = s_start,
2966 .next = s_next,
2967 .stop = s_stop,
2968 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969};
2970
Ingo Molnare309b412008-05-12 21:20:51 +02002971static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002972__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002974 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002976 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002978 if (tracing_disabled)
2979 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002980
Jiri Olsa50e18b92012-04-25 10:23:39 +02002981 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002982 if (!iter)
2983 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002984
Steven Rostedt6d158a82012-06-27 20:46:14 -04002985 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2986 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002987 if (!iter->buffer_iter)
2988 goto release;
2989
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002990 /*
2991 * We make a copy of the current tracer to avoid concurrent
2992 * changes on it while we are reading.
2993 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002994 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002995 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002996 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002997 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002999 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003000
Li Zefan79f55992009-06-15 14:58:26 +08003001 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003002 goto fail;
3003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003004 iter->tr = tr;
3005
3006#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003007 /* Currently only the top directory has a snapshot */
3008 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003009 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003010 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003011#endif
3012 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003013 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003015 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003016 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003017
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003018 /* Notify the tracer early; before we stop tracing. */
3019 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003020 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003021
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003022 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003024 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3025
David Sharp8be07092012-11-13 12:18:22 -08003026 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003027 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003028 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3029
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003030 /* stop the trace while dumping if we are not opening "snapshot" */
3031 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003032 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003033
Steven Rostedtae3b5092013-01-23 15:22:59 -05003034 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003035 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003036 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003037 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003038 }
3039 ring_buffer_read_prepare_sync();
3040 for_each_tracing_cpu(cpu) {
3041 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003042 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003043 }
3044 } else {
3045 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003046 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003047 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003048 ring_buffer_read_prepare_sync();
3049 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003050 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003051 }
3052
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003053 mutex_unlock(&trace_types_lock);
3054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003056
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003057 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003058 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003059 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003060 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003061release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003062 seq_release_private(inode, file);
3063 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064}
3065
3066int tracing_open_generic(struct inode *inode, struct file *filp)
3067{
Steven Rostedt60a11772008-05-12 21:20:44 +02003068 if (tracing_disabled)
3069 return -ENODEV;
3070
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003071 filp->private_data = inode->i_private;
3072 return 0;
3073}
3074
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003075bool tracing_is_disabled(void)
3076{
3077 return (tracing_disabled) ? true: false;
3078}
3079
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003080/*
3081 * Open and update trace_array ref count.
3082 * Must have the current trace_array passed to it.
3083 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003084static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003085{
3086 struct trace_array *tr = inode->i_private;
3087
3088 if (tracing_disabled)
3089 return -ENODEV;
3090
3091 if (trace_array_get(tr) < 0)
3092 return -ENODEV;
3093
3094 filp->private_data = inode->i_private;
3095
3096 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003097}
3098
Hannes Eder4fd27352009-02-10 19:44:12 +01003099static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003100{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003101 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003102 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003103 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003104 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003106 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003107 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003108 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003109 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003110
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003112 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003114
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003115 for_each_tracing_cpu(cpu) {
3116 if (iter->buffer_iter[cpu])
3117 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3118 }
3119
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003120 if (iter->trace && iter->trace->close)
3121 iter->trace->close(iter);
3122
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003123 if (!iter->snapshot)
3124 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003125 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003126
3127 __trace_array_put(tr);
3128
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129 mutex_unlock(&trace_types_lock);
3130
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003131 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003132 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003133 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003134 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003135 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003136
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 return 0;
3138}
3139
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003140static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3141{
3142 struct trace_array *tr = inode->i_private;
3143
3144 trace_array_put(tr);
3145 return 0;
3146}
3147
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003148static int tracing_single_release_tr(struct inode *inode, struct file *file)
3149{
3150 struct trace_array *tr = inode->i_private;
3151
3152 trace_array_put(tr);
3153
3154 return single_release(inode, file);
3155}
3156
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157static int tracing_open(struct inode *inode, struct file *file)
3158{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003159 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003160 struct trace_iterator *iter;
3161 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003162
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003163 if (trace_array_get(tr) < 0)
3164 return -ENODEV;
3165
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003166 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003167 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3168 int cpu = tracing_get_cpu(inode);
3169
3170 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003171 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003172 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003173 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003174 }
3175
3176 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003177 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 if (IS_ERR(iter))
3179 ret = PTR_ERR(iter);
3180 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3181 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3182 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003183
3184 if (ret < 0)
3185 trace_array_put(tr);
3186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003187 return ret;
3188}
3189
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003190/*
3191 * Some tracers are not suitable for instance buffers.
3192 * A tracer is always available for the global array (toplevel)
3193 * or if it explicitly states that it is.
3194 */
3195static bool
3196trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3197{
3198 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3199}
3200
3201/* Find the next tracer that this trace array may use */
3202static struct tracer *
3203get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3204{
3205 while (t && !trace_ok_for_array(t, tr))
3206 t = t->next;
3207
3208 return t;
3209}
3210
Ingo Molnare309b412008-05-12 21:20:51 +02003211static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212t_next(struct seq_file *m, void *v, loff_t *pos)
3213{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003214 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003215 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003216
3217 (*pos)++;
3218
3219 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003220 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003221
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222 return t;
3223}
3224
3225static void *t_start(struct seq_file *m, loff_t *pos)
3226{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003227 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003228 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229 loff_t l = 0;
3230
3231 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003232
3233 t = get_tracer_for_array(tr, trace_types);
3234 for (; t && l < *pos; t = t_next(m, t, &l))
3235 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236
3237 return t;
3238}
3239
3240static void t_stop(struct seq_file *m, void *p)
3241{
3242 mutex_unlock(&trace_types_lock);
3243}
3244
3245static int t_show(struct seq_file *m, void *v)
3246{
3247 struct tracer *t = v;
3248
3249 if (!t)
3250 return 0;
3251
3252 seq_printf(m, "%s", t->name);
3253 if (t->next)
3254 seq_putc(m, ' ');
3255 else
3256 seq_putc(m, '\n');
3257
3258 return 0;
3259}
3260
James Morris88e9d342009-09-22 16:43:43 -07003261static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003262 .start = t_start,
3263 .next = t_next,
3264 .stop = t_stop,
3265 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266};
3267
3268static int show_traces_open(struct inode *inode, struct file *file)
3269{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003270 struct trace_array *tr = inode->i_private;
3271 struct seq_file *m;
3272 int ret;
3273
Steven Rostedt60a11772008-05-12 21:20:44 +02003274 if (tracing_disabled)
3275 return -ENODEV;
3276
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003277 ret = seq_open(file, &show_traces_seq_ops);
3278 if (ret)
3279 return ret;
3280
3281 m = file->private_data;
3282 m->private = tr;
3283
3284 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003285}
3286
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003287static ssize_t
3288tracing_write_stub(struct file *filp, const char __user *ubuf,
3289 size_t count, loff_t *ppos)
3290{
3291 return count;
3292}
3293
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003294loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003295{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003296 int ret;
3297
Slava Pestov364829b2010-11-24 15:13:16 -08003298 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003299 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003300 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003301 file->f_pos = ret = 0;
3302
3303 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003304}
3305
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003306static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003307 .open = tracing_open,
3308 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003309 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003310 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003311 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003312};
3313
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003314static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003315 .open = show_traces_open,
3316 .read = seq_read,
3317 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003318 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319};
3320
Ingo Molnar36dfe922008-05-12 21:20:52 +02003321/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003322 * The tracer itself will not take this lock, but still we want
3323 * to provide a consistent cpumask to user-space:
3324 */
3325static DEFINE_MUTEX(tracing_cpumask_update_lock);
3326
3327/*
3328 * Temporary storage for the character representation of the
3329 * CPU bitmask (and one more byte for the newline):
3330 */
3331static char mask_str[NR_CPUS + 1];
3332
Ingo Molnarc7078de2008-05-12 21:20:52 +02003333static ssize_t
3334tracing_cpumask_read(struct file *filp, char __user *ubuf,
3335 size_t count, loff_t *ppos)
3336{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003337 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003338 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003339
3340 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003341
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003342 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003343 if (count - len < 2) {
3344 count = -EINVAL;
3345 goto out_err;
3346 }
3347 len += sprintf(mask_str + len, "\n");
3348 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3349
3350out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003351 mutex_unlock(&tracing_cpumask_update_lock);
3352
3353 return count;
3354}
3355
3356static ssize_t
3357tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3358 size_t count, loff_t *ppos)
3359{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003360 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303361 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003362 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303363
3364 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3365 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003366
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303367 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003368 if (err)
3369 goto err_unlock;
3370
Li Zefan215368e2009-06-15 10:56:42 +08003371 mutex_lock(&tracing_cpumask_update_lock);
3372
Steven Rostedta5e25882008-12-02 15:34:05 -05003373 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003374 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003375 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003376 /*
3377 * Increase/decrease the disabled counter if we are
3378 * about to flip a bit in the cpumask:
3379 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003380 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303381 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003382 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3383 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003384 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003385 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303386 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003387 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3388 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003389 }
3390 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003391 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003392 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003393
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003394 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003395
Ingo Molnarc7078de2008-05-12 21:20:52 +02003396 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303397 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003398
Ingo Molnarc7078de2008-05-12 21:20:52 +02003399 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003400
3401err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003402 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003403
3404 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003405}
3406
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003407static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003408 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409 .read = tracing_cpumask_read,
3410 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003411 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003412 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003413};
3414
Li Zefanfdb372e2009-12-08 11:15:59 +08003415static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003417 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003418 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003419 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003420 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003421
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003422 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003423 tracer_flags = tr->current_trace->flags->val;
3424 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003425
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426 for (i = 0; trace_options[i]; i++) {
3427 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003428 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003430 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431 }
3432
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003433 for (i = 0; trace_opts[i].name; i++) {
3434 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003435 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003436 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003437 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003438 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003439 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003440
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442}
3443
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003444static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003445 struct tracer_flags *tracer_flags,
3446 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003447{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003448 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003449 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003451 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452 if (ret)
3453 return ret;
3454
3455 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003456 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003457 else
Zhaolei77708412009-08-07 18:53:21 +08003458 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003459 return 0;
3460}
3461
Li Zefan8d18eaa2009-12-08 11:17:06 +08003462/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003463static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003464{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003465 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003466 struct tracer_flags *tracer_flags = trace->flags;
3467 struct tracer_opt *opts = NULL;
3468 int i;
3469
3470 for (i = 0; tracer_flags->opts[i].name; i++) {
3471 opts = &tracer_flags->opts[i];
3472
3473 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003474 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003475 }
3476
3477 return -EINVAL;
3478}
3479
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003480/* Some tracers require overwrite to stay enabled */
3481int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3482{
3483 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3484 return -1;
3485
3486 return 0;
3487}
3488
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003489int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003490{
3491 /* do nothing if flag is already set */
3492 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003493 return 0;
3494
3495 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003496 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003497 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003498 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003499
3500 if (enabled)
3501 trace_flags |= mask;
3502 else
3503 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003504
3505 if (mask == TRACE_ITER_RECORD_CMD)
3506 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003507
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003508 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003509 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003510#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003511 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003512#endif
3513 }
Steven Rostedt81698832012-10-11 10:15:05 -04003514
3515 if (mask == TRACE_ITER_PRINTK)
3516 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003517
3518 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003519}
3520
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003521static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003523 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003524 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003525 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526 int i;
3527
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003528 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003529
Li Zefan8d18eaa2009-12-08 11:17:06 +08003530 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531 neg = 1;
3532 cmp += 2;
3533 }
3534
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003535 mutex_lock(&trace_types_lock);
3536
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003537 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003538 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003539 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 break;
3541 }
3542 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003543
3544 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003545 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003546 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003547
3548 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003550 return ret;
3551}
3552
3553static ssize_t
3554tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3555 size_t cnt, loff_t *ppos)
3556{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003557 struct seq_file *m = filp->private_data;
3558 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003559 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003560 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003561
3562 if (cnt >= sizeof(buf))
3563 return -EINVAL;
3564
3565 if (copy_from_user(&buf, ubuf, cnt))
3566 return -EFAULT;
3567
Steven Rostedta8dd2172013-01-09 20:54:17 -05003568 buf[cnt] = 0;
3569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003570 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003571 if (ret < 0)
3572 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003573
Jiri Olsacf8517c2009-10-23 19:36:16 -04003574 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575
3576 return cnt;
3577}
3578
Li Zefanfdb372e2009-12-08 11:15:59 +08003579static int tracing_trace_options_open(struct inode *inode, struct file *file)
3580{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003581 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003582 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003583
Li Zefanfdb372e2009-12-08 11:15:59 +08003584 if (tracing_disabled)
3585 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003586
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003587 if (trace_array_get(tr) < 0)
3588 return -ENODEV;
3589
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003590 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3591 if (ret < 0)
3592 trace_array_put(tr);
3593
3594 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003595}
3596
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003597static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003598 .open = tracing_trace_options_open,
3599 .read = seq_read,
3600 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003601 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003602 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003603};
3604
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003605static const char readme_msg[] =
3606 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003607 "# echo 0 > tracing_on : quick way to disable tracing\n"
3608 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3609 " Important files:\n"
3610 " trace\t\t\t- The static contents of the buffer\n"
3611 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3612 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3613 " current_tracer\t- function and latency tracers\n"
3614 " available_tracers\t- list of configured tracers for current_tracer\n"
3615 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3616 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3617 " trace_clock\t\t-change the clock used to order events\n"
3618 " local: Per cpu clock but may not be synced across CPUs\n"
3619 " global: Synced across CPUs but slows tracing down.\n"
3620 " counter: Not a clock, but just an increment\n"
3621 " uptime: Jiffy counter from time of boot\n"
3622 " perf: Same clock that perf events use\n"
3623#ifdef CONFIG_X86_64
3624 " x86-tsc: TSC cycle counter\n"
3625#endif
3626 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3627 " tracing_cpumask\t- Limit which CPUs to trace\n"
3628 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3629 "\t\t\t Remove sub-buffer with rmdir\n"
3630 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003631 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3632 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003633 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003634#ifdef CONFIG_DYNAMIC_FTRACE
3635 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003636 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3637 "\t\t\t functions\n"
3638 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3639 "\t modules: Can select a group via module\n"
3640 "\t Format: :mod:<module-name>\n"
3641 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3642 "\t triggers: a command to perform when function is hit\n"
3643 "\t Format: <function>:<trigger>[:count]\n"
3644 "\t trigger: traceon, traceoff\n"
3645 "\t\t enable_event:<system>:<event>\n"
3646 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003647#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003648 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003649#endif
3650#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003651 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003652#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003653 "\t\t dump\n"
3654 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003655 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3656 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3657 "\t The first one will disable tracing every time do_fault is hit\n"
3658 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3659 "\t The first time do trap is hit and it disables tracing, the\n"
3660 "\t counter will decrement to 2. If tracing is already disabled,\n"
3661 "\t the counter will not decrement. It only decrements when the\n"
3662 "\t trigger did work\n"
3663 "\t To remove trigger without count:\n"
3664 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3665 "\t To remove trigger with a count:\n"
3666 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003667 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003668 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3669 "\t modules: Can select a group via module command :mod:\n"
3670 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003671#endif /* CONFIG_DYNAMIC_FTRACE */
3672#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003673 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3674 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003675#endif
3676#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3677 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003678 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003679 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3680#endif
3681#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003682 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3683 "\t\t\t snapshot buffer. Read the contents for more\n"
3684 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003685#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003686#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687 " stack_trace\t\t- Shows the max stack trace when active\n"
3688 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003689 "\t\t\t Write into this file to reset the max size (trigger a\n"
3690 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003691#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003692 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3693 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003694#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003695#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003696 " events/\t\t- Directory containing all trace event subsystems:\n"
3697 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3698 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003699 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3700 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003701 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003702 " events/<system>/<event>/\t- Directory containing control files for\n"
3703 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003704 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3705 " filter\t\t- If set, only events passing filter are traced\n"
3706 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003707 "\t Format: <trigger>[:count][if <filter>]\n"
3708 "\t trigger: traceon, traceoff\n"
3709 "\t enable_event:<system>:<event>\n"
3710 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003711#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003712 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003713#endif
3714#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003716#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003717 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3718 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3719 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3720 "\t events/block/block_unplug/trigger\n"
3721 "\t The first disables tracing every time block_unplug is hit.\n"
3722 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3723 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3724 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3725 "\t Like function triggers, the counter is only decremented if it\n"
3726 "\t enabled or disabled tracing.\n"
3727 "\t To remove a trigger without a count:\n"
3728 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3729 "\t To remove a trigger with a count:\n"
3730 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3731 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003732;
3733
3734static ssize_t
3735tracing_readme_read(struct file *filp, char __user *ubuf,
3736 size_t cnt, loff_t *ppos)
3737{
3738 return simple_read_from_buffer(ubuf, cnt, ppos,
3739 readme_msg, strlen(readme_msg));
3740}
3741
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003742static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003743 .open = tracing_open_generic,
3744 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003745 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003746};
3747
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003748static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003749{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003750 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003751
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003752 if (*pos || m->count)
3753 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003754
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003755 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003756
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003757 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3758 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760 continue;
3761
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003762 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003763 }
3764
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003765 return NULL;
3766}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3769{
3770 void *v;
3771 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003772
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003773 preempt_disable();
3774 arch_spin_lock(&trace_cmdline_lock);
3775
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003776 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003777 while (l <= *pos) {
3778 v = saved_cmdlines_next(m, v, &l);
3779 if (!v)
3780 return NULL;
3781 }
3782
3783 return v;
3784}
3785
3786static void saved_cmdlines_stop(struct seq_file *m, void *v)
3787{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003788 arch_spin_unlock(&trace_cmdline_lock);
3789 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003790}
3791
3792static int saved_cmdlines_show(struct seq_file *m, void *v)
3793{
3794 char buf[TASK_COMM_LEN];
3795 unsigned int *pid = v;
3796
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003797 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003798 seq_printf(m, "%d %s\n", *pid, buf);
3799 return 0;
3800}
3801
3802static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3803 .start = saved_cmdlines_start,
3804 .next = saved_cmdlines_next,
3805 .stop = saved_cmdlines_stop,
3806 .show = saved_cmdlines_show,
3807};
3808
3809static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3810{
3811 if (tracing_disabled)
3812 return -ENODEV;
3813
3814 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003815}
3816
3817static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003818 .open = tracing_saved_cmdlines_open,
3819 .read = seq_read,
3820 .llseek = seq_lseek,
3821 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003822};
3823
3824static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003825tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3826 size_t cnt, loff_t *ppos)
3827{
3828 char buf[64];
3829 int r;
3830
3831 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003832 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003833 arch_spin_unlock(&trace_cmdline_lock);
3834
3835 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3836}
3837
3838static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3839{
3840 kfree(s->saved_cmdlines);
3841 kfree(s->map_cmdline_to_pid);
3842 kfree(s);
3843}
3844
3845static int tracing_resize_saved_cmdlines(unsigned int val)
3846{
3847 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3848
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003849 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003850 if (!s)
3851 return -ENOMEM;
3852
3853 if (allocate_cmdlines_buffer(val, s) < 0) {
3854 kfree(s);
3855 return -ENOMEM;
3856 }
3857
3858 arch_spin_lock(&trace_cmdline_lock);
3859 savedcmd_temp = savedcmd;
3860 savedcmd = s;
3861 arch_spin_unlock(&trace_cmdline_lock);
3862 free_saved_cmdlines_buffer(savedcmd_temp);
3863
3864 return 0;
3865}
3866
3867static ssize_t
3868tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3869 size_t cnt, loff_t *ppos)
3870{
3871 unsigned long val;
3872 int ret;
3873
3874 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3875 if (ret)
3876 return ret;
3877
3878 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3879 if (!val || val > PID_MAX_DEFAULT)
3880 return -EINVAL;
3881
3882 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3883 if (ret < 0)
3884 return ret;
3885
3886 *ppos += cnt;
3887
3888 return cnt;
3889}
3890
3891static const struct file_operations tracing_saved_cmdlines_size_fops = {
3892 .open = tracing_open_generic,
3893 .read = tracing_saved_cmdlines_size_read,
3894 .write = tracing_saved_cmdlines_size_write,
3895};
3896
3897static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003898tracing_set_trace_read(struct file *filp, char __user *ubuf,
3899 size_t cnt, loff_t *ppos)
3900{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003901 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003902 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003903 int r;
3904
3905 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003906 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907 mutex_unlock(&trace_types_lock);
3908
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003910}
3911
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003912int tracer_init(struct tracer *t, struct trace_array *tr)
3913{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003914 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003915 return t->init(tr);
3916}
3917
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003918static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003919{
3920 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003921
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003922 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003923 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003924}
3925
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003926#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003927/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003928static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3929 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003930{
3931 int cpu, ret = 0;
3932
3933 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3934 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003935 ret = ring_buffer_resize(trace_buf->buffer,
3936 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003937 if (ret < 0)
3938 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939 per_cpu_ptr(trace_buf->data, cpu)->entries =
3940 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941 }
3942 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003943 ret = ring_buffer_resize(trace_buf->buffer,
3944 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003945 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3947 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003948 }
3949
3950 return ret;
3951}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003952#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003953
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003954static int __tracing_resize_ring_buffer(struct trace_array *tr,
3955 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003956{
3957 int ret;
3958
3959 /*
3960 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003961 * we use the size that was given, and we can forget about
3962 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003963 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003964 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003965
Steven Rostedtb382ede62012-10-10 21:44:34 -04003966 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003967 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003968 return 0;
3969
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003970 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003971 if (ret < 0)
3972 return ret;
3973
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003974#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003975 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3976 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003977 goto out;
3978
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003979 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003980 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3982 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003983 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003984 /*
3985 * AARGH! We are left with different
3986 * size max buffer!!!!
3987 * The max buffer is our "snapshot" buffer.
3988 * When a tracer needs a snapshot (one of the
3989 * latency tracers), it swaps the max buffer
3990 * with the saved snap shot. We succeeded to
3991 * update the size of the main buffer, but failed to
3992 * update the size of the max buffer. But when we tried
3993 * to reset the main buffer to the original size, we
3994 * failed there too. This is very unlikely to
3995 * happen, but if it does, warn and kill all
3996 * tracing.
3997 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003998 WARN_ON(1);
3999 tracing_disabled = 1;
4000 }
4001 return ret;
4002 }
4003
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004004 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004005 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004006 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004007 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004008
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004009 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004010#endif /* CONFIG_TRACER_MAX_TRACE */
4011
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004012 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004013 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004014 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004015 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004016
4017 return ret;
4018}
4019
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004020static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4021 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004022{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004023 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004024
4025 mutex_lock(&trace_types_lock);
4026
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004027 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4028 /* make sure, this cpu is enabled in the mask */
4029 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4030 ret = -EINVAL;
4031 goto out;
4032 }
4033 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004035 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004036 if (ret < 0)
4037 ret = -ENOMEM;
4038
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004039out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004040 mutex_unlock(&trace_types_lock);
4041
4042 return ret;
4043}
4044
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004045
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004046/**
4047 * tracing_update_buffers - used by tracing facility to expand ring buffers
4048 *
4049 * To save on memory when the tracing is never used on a system with it
4050 * configured in. The ring buffers are set to a minimum size. But once
4051 * a user starts to use the tracing facility, then they need to grow
4052 * to their default size.
4053 *
4054 * This function is to be called when a tracer is about to be used.
4055 */
4056int tracing_update_buffers(void)
4057{
4058 int ret = 0;
4059
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004060 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004061 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004062 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004063 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004064 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004065
4066 return ret;
4067}
4068
Steven Rostedt577b7852009-02-26 23:43:05 -05004069struct trace_option_dentry;
4070
4071static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004072create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004073
4074static void
4075destroy_trace_option_files(struct trace_option_dentry *topts);
4076
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004077/*
4078 * Used to clear out the tracer before deletion of an instance.
4079 * Must have trace_types_lock held.
4080 */
4081static void tracing_set_nop(struct trace_array *tr)
4082{
4083 if (tr->current_trace == &nop_trace)
4084 return;
4085
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004086 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004087
4088 if (tr->current_trace->reset)
4089 tr->current_trace->reset(tr);
4090
4091 tr->current_trace = &nop_trace;
4092}
4093
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004094static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095{
Steven Rostedt577b7852009-02-26 23:43:05 -05004096 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004097 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004098#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004099 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004100#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004101 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004102
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004103 mutex_lock(&trace_types_lock);
4104
Steven Rostedt73c51622009-03-11 13:42:01 -04004105 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004106 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004107 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004108 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004109 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004110 ret = 0;
4111 }
4112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113 for (t = trace_types; t; t = t->next) {
4114 if (strcmp(t->name, buf) == 0)
4115 break;
4116 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004117 if (!t) {
4118 ret = -EINVAL;
4119 goto out;
4120 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004121 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004122 goto out;
4123
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004124 /* Some tracers are only allowed for the top level buffer */
4125 if (!trace_ok_for_array(t, tr)) {
4126 ret = -EINVAL;
4127 goto out;
4128 }
4129
Steven Rostedt9f029e82008-11-12 15:24:24 -05004130 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004131
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004132 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004133
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004134 if (tr->current_trace->reset)
4135 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004136
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004137 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004138 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004139
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004140#ifdef CONFIG_TRACER_MAX_TRACE
4141 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004142
4143 if (had_max_tr && !t->use_max_tr) {
4144 /*
4145 * We need to make sure that the update_max_tr sees that
4146 * current_trace changed to nop_trace to keep it from
4147 * swapping the buffers after we resize it.
4148 * The update_max_tr is called from interrupts disabled
4149 * so a synchronized_sched() is sufficient.
4150 */
4151 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004152 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004153 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004154#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004155 /* Currently, only the top instance has options */
4156 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4157 destroy_trace_option_files(topts);
4158 topts = create_trace_option_files(tr, t);
4159 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004160
4161#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004162 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004163 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004164 if (ret < 0)
4165 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004166 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004167#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004168
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004169 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004170 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004171 if (ret)
4172 goto out;
4173 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004174
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004175 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004176 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004177 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004178 out:
4179 mutex_unlock(&trace_types_lock);
4180
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004181 return ret;
4182}
4183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004184static ssize_t
4185tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4186 size_t cnt, loff_t *ppos)
4187{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004188 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004189 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004190 int i;
4191 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004192 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193
Steven Rostedt60063a62008-10-28 10:44:24 -04004194 ret = cnt;
4195
Li Zefanee6c2c12009-09-18 14:06:47 +08004196 if (cnt > MAX_TRACER_SIZE)
4197 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004198
4199 if (copy_from_user(&buf, ubuf, cnt))
4200 return -EFAULT;
4201
4202 buf[cnt] = 0;
4203
4204 /* strip ending whitespace. */
4205 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4206 buf[i] = 0;
4207
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004208 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004209 if (err)
4210 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004211
Jiri Olsacf8517c2009-10-23 19:36:16 -04004212 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004213
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004214 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004215}
4216
4217static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004218tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4219 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004220{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004221 char buf[64];
4222 int r;
4223
Steven Rostedtcffae432008-05-12 21:21:00 +02004224 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004225 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004226 if (r > sizeof(buf))
4227 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004228 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229}
4230
4231static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004232tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4233 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004234{
Hannes Eder5e398412009-02-10 19:44:34 +01004235 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004236 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237
Peter Huewe22fe9b52011-06-07 21:58:27 +02004238 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4239 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004240 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241
4242 *ptr = val * 1000;
4243
4244 return cnt;
4245}
4246
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004247static ssize_t
4248tracing_thresh_read(struct file *filp, char __user *ubuf,
4249 size_t cnt, loff_t *ppos)
4250{
4251 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4252}
4253
4254static ssize_t
4255tracing_thresh_write(struct file *filp, const char __user *ubuf,
4256 size_t cnt, loff_t *ppos)
4257{
4258 struct trace_array *tr = filp->private_data;
4259 int ret;
4260
4261 mutex_lock(&trace_types_lock);
4262 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4263 if (ret < 0)
4264 goto out;
4265
4266 if (tr->current_trace->update_thresh) {
4267 ret = tr->current_trace->update_thresh(tr);
4268 if (ret < 0)
4269 goto out;
4270 }
4271
4272 ret = cnt;
4273out:
4274 mutex_unlock(&trace_types_lock);
4275
4276 return ret;
4277}
4278
4279static ssize_t
4280tracing_max_lat_read(struct file *filp, char __user *ubuf,
4281 size_t cnt, loff_t *ppos)
4282{
4283 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4284}
4285
4286static ssize_t
4287tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4288 size_t cnt, loff_t *ppos)
4289{
4290 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4291}
4292
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293static int tracing_open_pipe(struct inode *inode, struct file *filp)
4294{
Oleg Nesterov15544202013-07-23 17:25:57 +02004295 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004296 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004297 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004298
4299 if (tracing_disabled)
4300 return -ENODEV;
4301
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004302 if (trace_array_get(tr) < 0)
4303 return -ENODEV;
4304
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004305 mutex_lock(&trace_types_lock);
4306
Steven Rostedtb3806b42008-05-12 21:20:46 +02004307 /* create a buffer to store the information to pass to userspace */
4308 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004309 if (!iter) {
4310 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004311 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004312 goto out;
4313 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004314
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004315 /*
4316 * We make a copy of the current tracer to avoid concurrent
4317 * changes on it while we are reading.
4318 */
4319 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4320 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004321 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004322 goto fail;
4323 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004324 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004325
4326 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4327 ret = -ENOMEM;
4328 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304329 }
4330
Steven Rostedta3097202008-11-07 22:36:02 -05004331 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304332 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004333
Steven Rostedt112f38a72009-06-01 15:16:05 -04004334 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4335 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4336
David Sharp8be07092012-11-13 12:18:22 -08004337 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004338 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004339 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4340
Oleg Nesterov15544202013-07-23 17:25:57 +02004341 iter->tr = tr;
4342 iter->trace_buffer = &tr->trace_buffer;
4343 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004344 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004345 filp->private_data = iter;
4346
Steven Rostedt107bad82008-05-12 21:21:01 +02004347 if (iter->trace->pipe_open)
4348 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004349
Arnd Bergmannb4447862010-07-07 23:40:11 +02004350 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004351out:
4352 mutex_unlock(&trace_types_lock);
4353 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004354
4355fail:
4356 kfree(iter->trace);
4357 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004358 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004359 mutex_unlock(&trace_types_lock);
4360 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004361}
4362
4363static int tracing_release_pipe(struct inode *inode, struct file *file)
4364{
4365 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004366 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004367
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004368 mutex_lock(&trace_types_lock);
4369
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004370 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004371 iter->trace->pipe_close(iter);
4372
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004373 mutex_unlock(&trace_types_lock);
4374
Rusty Russell44623442009-01-01 10:12:23 +10304375 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004376 mutex_destroy(&iter->mutex);
4377 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004378 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004379
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004380 trace_array_put(tr);
4381
Steven Rostedtb3806b42008-05-12 21:20:46 +02004382 return 0;
4383}
4384
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004385static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004386trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004387{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004388 /* Iterators are static, they should be filled or empty */
4389 if (trace_buffer_iter(iter, iter->cpu_file))
4390 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004391
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004392 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004393 /*
4394 * Always select as readable when in blocking mode
4395 */
4396 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004397 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004398 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004399 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004400}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004401
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004402static unsigned int
4403tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4404{
4405 struct trace_iterator *iter = filp->private_data;
4406
4407 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004408}
4409
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004410/* Must be called with trace_types_lock mutex held. */
4411static int tracing_wait_pipe(struct file *filp)
4412{
4413 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004414 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004415
4416 while (trace_empty(iter)) {
4417
4418 if ((filp->f_flags & O_NONBLOCK)) {
4419 return -EAGAIN;
4420 }
4421
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004422 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004423 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004424 * We still block if tracing is disabled, but we have never
4425 * read anything. This allows a user to cat this file, and
4426 * then enable tracing. But after we have read something,
4427 * we give an EOF when tracing is again disabled.
4428 *
4429 * iter->pos will be 0 if we haven't read anything.
4430 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004431 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004432 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004433
4434 mutex_unlock(&iter->mutex);
4435
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004436 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004437
4438 mutex_lock(&iter->mutex);
4439
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004440 if (ret)
4441 return ret;
4442
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004443 if (signal_pending(current))
4444 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004445 }
4446
4447 return 1;
4448}
4449
Steven Rostedtb3806b42008-05-12 21:20:46 +02004450/*
4451 * Consumer reader.
4452 */
4453static ssize_t
4454tracing_read_pipe(struct file *filp, char __user *ubuf,
4455 size_t cnt, loff_t *ppos)
4456{
4457 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004458 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004459 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004460
4461 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004462 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4463 if (sret != -EBUSY)
4464 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004465
Steven Rostedtf9520752009-03-02 14:04:40 -05004466 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004467
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004468 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004469 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004470 if (unlikely(iter->trace->name != tr->current_trace->name))
4471 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004472 mutex_unlock(&trace_types_lock);
4473
4474 /*
4475 * Avoid more than one consumer on a single file descriptor
4476 * This is just a matter of traces coherency, the ring buffer itself
4477 * is protected.
4478 */
4479 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004480 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004481 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4482 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004483 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004484 }
4485
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004486waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004487 sret = tracing_wait_pipe(filp);
4488 if (sret <= 0)
4489 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004490
4491 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004492 if (trace_empty(iter)) {
4493 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004494 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004495 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004496
4497 if (cnt >= PAGE_SIZE)
4498 cnt = PAGE_SIZE - 1;
4499
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004500 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004501 memset(&iter->seq, 0,
4502 sizeof(struct trace_iterator) -
4503 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004504 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004505 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004506
Lai Jiangshan4f535962009-05-18 19:35:34 +08004507 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004508 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004509 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004510 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004511 int len = iter->seq.len;
4512
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004513 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004514 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004515 /* don't print partial lines */
4516 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004517 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004518 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004519 if (ret != TRACE_TYPE_NO_CONSUME)
4520 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004521
4522 if (iter->seq.len >= cnt)
4523 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004524
4525 /*
4526 * Setting the full flag means we reached the trace_seq buffer
4527 * size and we should leave by partial output condition above.
4528 * One of the trace_seq_* functions is not used properly.
4529 */
4530 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4531 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004532 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004533 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004534 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004535
Steven Rostedtb3806b42008-05-12 21:20:46 +02004536 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004537 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4538 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004539 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004540
4541 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004542 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004543 * entries, go back to wait for more entries.
4544 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004545 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004546 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004547
Steven Rostedt107bad82008-05-12 21:21:01 +02004548out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004549 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004550
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004551 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004552}
4553
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004554static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4555 unsigned int idx)
4556{
4557 __free_page(spd->pages[idx]);
4558}
4559
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004560static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004561 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004562 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004563 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004564 .steal = generic_pipe_buf_steal,
4565 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004566};
4567
Steven Rostedt34cd4992009-02-09 12:06:29 -05004568static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004569tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004570{
4571 size_t count;
4572 int ret;
4573
4574 /* Seq buffer is page-sized, exactly what we need. */
4575 for (;;) {
4576 count = iter->seq.len;
4577 ret = print_trace_line(iter);
4578 count = iter->seq.len - count;
4579 if (rem < count) {
4580 rem = 0;
4581 iter->seq.len -= count;
4582 break;
4583 }
4584 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4585 iter->seq.len -= count;
4586 break;
4587 }
4588
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004589 if (ret != TRACE_TYPE_NO_CONSUME)
4590 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004591 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004592 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004593 rem = 0;
4594 iter->ent = NULL;
4595 break;
4596 }
4597 }
4598
4599 return rem;
4600}
4601
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004602static ssize_t tracing_splice_read_pipe(struct file *filp,
4603 loff_t *ppos,
4604 struct pipe_inode_info *pipe,
4605 size_t len,
4606 unsigned int flags)
4607{
Jens Axboe35f3d142010-05-20 10:43:18 +02004608 struct page *pages_def[PIPE_DEF_BUFFERS];
4609 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004610 struct trace_iterator *iter = filp->private_data;
4611 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004612 .pages = pages_def,
4613 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004614 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004615 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004616 .flags = flags,
4617 .ops = &tracing_pipe_buf_ops,
4618 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004619 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004620 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004621 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004622 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004623 unsigned int i;
4624
Jens Axboe35f3d142010-05-20 10:43:18 +02004625 if (splice_grow_spd(pipe, &spd))
4626 return -ENOMEM;
4627
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004628 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004630 if (unlikely(iter->trace->name != tr->current_trace->name))
4631 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004632 mutex_unlock(&trace_types_lock);
4633
4634 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004635
4636 if (iter->trace->splice_read) {
4637 ret = iter->trace->splice_read(iter, filp,
4638 ppos, pipe, len, flags);
4639 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004640 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004641 }
4642
4643 ret = tracing_wait_pipe(filp);
4644 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004645 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004646
Jason Wessel955b61e2010-08-05 09:22:23 -05004647 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004648 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004649 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004650 }
4651
Lai Jiangshan4f535962009-05-18 19:35:34 +08004652 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004653 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004654
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004655 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004656 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004657 spd.pages[i] = alloc_page(GFP_KERNEL);
4658 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004659 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004660
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004661 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004662
4663 /* Copy the data into the page, so we can start over. */
4664 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004665 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004666 iter->seq.len);
4667 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004668 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004669 break;
4670 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004671 spd.partial[i].offset = 0;
4672 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004673
Steven Rostedtf9520752009-03-02 14:04:40 -05004674 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004675 }
4676
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004677 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004678 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004679 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004680
4681 spd.nr_pages = i;
4682
Jens Axboe35f3d142010-05-20 10:43:18 +02004683 ret = splice_to_pipe(pipe, &spd);
4684out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004685 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004686 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004687
Steven Rostedt34cd4992009-02-09 12:06:29 -05004688out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004689 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004690 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004691}
4692
Steven Rostedta98a3c32008-05-12 21:20:59 +02004693static ssize_t
4694tracing_entries_read(struct file *filp, char __user *ubuf,
4695 size_t cnt, loff_t *ppos)
4696{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004697 struct inode *inode = file_inode(filp);
4698 struct trace_array *tr = inode->i_private;
4699 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004700 char buf[64];
4701 int r = 0;
4702 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004703
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004704 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004705
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004706 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004707 int cpu, buf_size_same;
4708 unsigned long size;
4709
4710 size = 0;
4711 buf_size_same = 1;
4712 /* check if all cpu sizes are same */
4713 for_each_tracing_cpu(cpu) {
4714 /* fill in the size from first enabled cpu */
4715 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004716 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4717 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004718 buf_size_same = 0;
4719 break;
4720 }
4721 }
4722
4723 if (buf_size_same) {
4724 if (!ring_buffer_expanded)
4725 r = sprintf(buf, "%lu (expanded: %lu)\n",
4726 size >> 10,
4727 trace_buf_size >> 10);
4728 else
4729 r = sprintf(buf, "%lu\n", size >> 10);
4730 } else
4731 r = sprintf(buf, "X\n");
4732 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004733 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004734
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004735 mutex_unlock(&trace_types_lock);
4736
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004737 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4738 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004739}
4740
4741static ssize_t
4742tracing_entries_write(struct file *filp, const char __user *ubuf,
4743 size_t cnt, loff_t *ppos)
4744{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004745 struct inode *inode = file_inode(filp);
4746 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004747 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004748 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004749
Peter Huewe22fe9b52011-06-07 21:58:27 +02004750 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4751 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004752 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004753
4754 /* must have at least 1 entry */
4755 if (!val)
4756 return -EINVAL;
4757
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004758 /* value is in KB */
4759 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004760 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004761 if (ret < 0)
4762 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004763
Jiri Olsacf8517c2009-10-23 19:36:16 -04004764 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004765
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004766 return cnt;
4767}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004768
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004769static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004770tracing_total_entries_read(struct file *filp, char __user *ubuf,
4771 size_t cnt, loff_t *ppos)
4772{
4773 struct trace_array *tr = filp->private_data;
4774 char buf[64];
4775 int r, cpu;
4776 unsigned long size = 0, expanded_size = 0;
4777
4778 mutex_lock(&trace_types_lock);
4779 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004780 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004781 if (!ring_buffer_expanded)
4782 expanded_size += trace_buf_size >> 10;
4783 }
4784 if (ring_buffer_expanded)
4785 r = sprintf(buf, "%lu\n", size);
4786 else
4787 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4788 mutex_unlock(&trace_types_lock);
4789
4790 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4791}
4792
4793static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004794tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4795 size_t cnt, loff_t *ppos)
4796{
4797 /*
4798 * There is no need to read what the user has written, this function
4799 * is just to make sure that there is no error when "echo" is used
4800 */
4801
4802 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004803
4804 return cnt;
4805}
4806
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004807static int
4808tracing_free_buffer_release(struct inode *inode, struct file *filp)
4809{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004810 struct trace_array *tr = inode->i_private;
4811
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004812 /* disable tracing ? */
4813 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004814 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004815 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004816 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004817
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004818 trace_array_put(tr);
4819
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004820 return 0;
4821}
4822
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004823static ssize_t
4824tracing_mark_write(struct file *filp, const char __user *ubuf,
4825 size_t cnt, loff_t *fpos)
4826{
Steven Rostedtd696b582011-09-22 11:50:27 -04004827 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004828 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004829 struct ring_buffer_event *event;
4830 struct ring_buffer *buffer;
4831 struct print_entry *entry;
4832 unsigned long irq_flags;
4833 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004834 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004835 int nr_pages = 1;
4836 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004837 int offset;
4838 int size;
4839 int len;
4840 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004841 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004842
Steven Rostedtc76f0692008-11-07 22:36:02 -05004843 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004844 return -EINVAL;
4845
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004846 if (!(trace_flags & TRACE_ITER_MARKERS))
4847 return -EINVAL;
4848
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004849 if (cnt > TRACE_BUF_SIZE)
4850 cnt = TRACE_BUF_SIZE;
4851
Steven Rostedtd696b582011-09-22 11:50:27 -04004852 /*
4853 * Userspace is injecting traces into the kernel trace buffer.
4854 * We want to be as non intrusive as possible.
4855 * To do so, we do not want to allocate any special buffers
4856 * or take any locks, but instead write the userspace data
4857 * straight into the ring buffer.
4858 *
4859 * First we need to pin the userspace buffer into memory,
4860 * which, most likely it is, because it just referenced it.
4861 * But there's no guarantee that it is. By using get_user_pages_fast()
4862 * and kmap_atomic/kunmap_atomic() we can get access to the
4863 * pages directly. We then write the data directly into the
4864 * ring buffer.
4865 */
4866 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004867
Steven Rostedtd696b582011-09-22 11:50:27 -04004868 /* check if we cross pages */
4869 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4870 nr_pages = 2;
4871
4872 offset = addr & (PAGE_SIZE - 1);
4873 addr &= PAGE_MASK;
4874
4875 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4876 if (ret < nr_pages) {
4877 while (--ret >= 0)
4878 put_page(pages[ret]);
4879 written = -EFAULT;
4880 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004881 }
4882
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004883 for (i = 0; i < nr_pages; i++)
4884 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004885
4886 local_save_flags(irq_flags);
4887 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004888 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004889 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4890 irq_flags, preempt_count());
4891 if (!event) {
4892 /* Ring buffer disabled, return as if not open for write */
4893 written = -EBADF;
4894 goto out_unlock;
4895 }
4896
4897 entry = ring_buffer_event_data(event);
4898 entry->ip = _THIS_IP_;
4899
4900 if (nr_pages == 2) {
4901 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004902 memcpy(&entry->buf, map_page[0] + offset, len);
4903 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004904 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004905 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004906
4907 if (entry->buf[cnt - 1] != '\n') {
4908 entry->buf[cnt] = '\n';
4909 entry->buf[cnt + 1] = '\0';
4910 } else
4911 entry->buf[cnt] = '\0';
4912
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004913 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004914
4915 written = cnt;
4916
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004917 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004918
Steven Rostedtd696b582011-09-22 11:50:27 -04004919 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004920 for (i = 0; i < nr_pages; i++){
4921 kunmap_atomic(map_page[i]);
4922 put_page(pages[i]);
4923 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004924 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004925 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004926}
4927
Li Zefan13f16d22009-12-08 11:16:11 +08004928static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004929{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004930 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004931 int i;
4932
4933 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004934 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004935 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004936 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4937 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004938 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004939
Li Zefan13f16d22009-12-08 11:16:11 +08004940 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004941}
4942
Steven Rostedte1e232c2014-02-10 23:38:46 -05004943static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004944{
Zhaolei5079f322009-08-25 16:12:56 +08004945 int i;
4946
Zhaolei5079f322009-08-25 16:12:56 +08004947 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4948 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4949 break;
4950 }
4951 if (i == ARRAY_SIZE(trace_clocks))
4952 return -EINVAL;
4953
Zhaolei5079f322009-08-25 16:12:56 +08004954 mutex_lock(&trace_types_lock);
4955
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004956 tr->clock_id = i;
4957
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004958 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004959
David Sharp60303ed2012-10-11 16:27:52 -07004960 /*
4961 * New clock may not be consistent with the previous clock.
4962 * Reset the buffer so that it doesn't have incomparable timestamps.
4963 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004964 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004965
4966#ifdef CONFIG_TRACER_MAX_TRACE
4967 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4968 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004969 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004970#endif
David Sharp60303ed2012-10-11 16:27:52 -07004971
Zhaolei5079f322009-08-25 16:12:56 +08004972 mutex_unlock(&trace_types_lock);
4973
Steven Rostedte1e232c2014-02-10 23:38:46 -05004974 return 0;
4975}
4976
4977static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4978 size_t cnt, loff_t *fpos)
4979{
4980 struct seq_file *m = filp->private_data;
4981 struct trace_array *tr = m->private;
4982 char buf[64];
4983 const char *clockstr;
4984 int ret;
4985
4986 if (cnt >= sizeof(buf))
4987 return -EINVAL;
4988
4989 if (copy_from_user(&buf, ubuf, cnt))
4990 return -EFAULT;
4991
4992 buf[cnt] = 0;
4993
4994 clockstr = strstrip(buf);
4995
4996 ret = tracing_set_clock(tr, clockstr);
4997 if (ret)
4998 return ret;
4999
Zhaolei5079f322009-08-25 16:12:56 +08005000 *fpos += cnt;
5001
5002 return cnt;
5003}
5004
Li Zefan13f16d22009-12-08 11:16:11 +08005005static int tracing_clock_open(struct inode *inode, struct file *file)
5006{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005007 struct trace_array *tr = inode->i_private;
5008 int ret;
5009
Li Zefan13f16d22009-12-08 11:16:11 +08005010 if (tracing_disabled)
5011 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005012
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005013 if (trace_array_get(tr))
5014 return -ENODEV;
5015
5016 ret = single_open(file, tracing_clock_show, inode->i_private);
5017 if (ret < 0)
5018 trace_array_put(tr);
5019
5020 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005021}
5022
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005023struct ftrace_buffer_info {
5024 struct trace_iterator iter;
5025 void *spare;
5026 unsigned int read;
5027};
5028
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005029#ifdef CONFIG_TRACER_SNAPSHOT
5030static int tracing_snapshot_open(struct inode *inode, struct file *file)
5031{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005032 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005033 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005034 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005035 int ret = 0;
5036
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005037 if (trace_array_get(tr) < 0)
5038 return -ENODEV;
5039
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005040 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005041 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005042 if (IS_ERR(iter))
5043 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005044 } else {
5045 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005046 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005047 m = kzalloc(sizeof(*m), GFP_KERNEL);
5048 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005049 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005050 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5051 if (!iter) {
5052 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005053 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005054 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005055 ret = 0;
5056
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005057 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005058 iter->trace_buffer = &tr->max_buffer;
5059 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005060 m->private = iter;
5061 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005062 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005063out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005064 if (ret < 0)
5065 trace_array_put(tr);
5066
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005067 return ret;
5068}
5069
5070static ssize_t
5071tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5072 loff_t *ppos)
5073{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005074 struct seq_file *m = filp->private_data;
5075 struct trace_iterator *iter = m->private;
5076 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005077 unsigned long val;
5078 int ret;
5079
5080 ret = tracing_update_buffers();
5081 if (ret < 0)
5082 return ret;
5083
5084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5085 if (ret)
5086 return ret;
5087
5088 mutex_lock(&trace_types_lock);
5089
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005090 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005091 ret = -EBUSY;
5092 goto out;
5093 }
5094
5095 switch (val) {
5096 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005097 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5098 ret = -EINVAL;
5099 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005100 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005101 if (tr->allocated_snapshot)
5102 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005103 break;
5104 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005105/* Only allow per-cpu swap if the ring buffer supports it */
5106#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5108 ret = -EINVAL;
5109 break;
5110 }
5111#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005112 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005113 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005114 if (ret < 0)
5115 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005116 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005117 local_irq_disable();
5118 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005119 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005120 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005121 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005122 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005123 local_irq_enable();
5124 break;
5125 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005126 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005127 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5128 tracing_reset_online_cpus(&tr->max_buffer);
5129 else
5130 tracing_reset(&tr->max_buffer, iter->cpu_file);
5131 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005132 break;
5133 }
5134
5135 if (ret >= 0) {
5136 *ppos += cnt;
5137 ret = cnt;
5138 }
5139out:
5140 mutex_unlock(&trace_types_lock);
5141 return ret;
5142}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005143
5144static int tracing_snapshot_release(struct inode *inode, struct file *file)
5145{
5146 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005147 int ret;
5148
5149 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005150
5151 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005152 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005153
5154 /* If write only, the seq_file is just a stub */
5155 if (m)
5156 kfree(m->private);
5157 kfree(m);
5158
5159 return 0;
5160}
5161
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005162static int tracing_buffers_open(struct inode *inode, struct file *filp);
5163static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5164 size_t count, loff_t *ppos);
5165static int tracing_buffers_release(struct inode *inode, struct file *file);
5166static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5167 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5168
5169static int snapshot_raw_open(struct inode *inode, struct file *filp)
5170{
5171 struct ftrace_buffer_info *info;
5172 int ret;
5173
5174 ret = tracing_buffers_open(inode, filp);
5175 if (ret < 0)
5176 return ret;
5177
5178 info = filp->private_data;
5179
5180 if (info->iter.trace->use_max_tr) {
5181 tracing_buffers_release(inode, filp);
5182 return -EBUSY;
5183 }
5184
5185 info->iter.snapshot = true;
5186 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5187
5188 return ret;
5189}
5190
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005191#endif /* CONFIG_TRACER_SNAPSHOT */
5192
5193
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005194static const struct file_operations tracing_thresh_fops = {
5195 .open = tracing_open_generic,
5196 .read = tracing_thresh_read,
5197 .write = tracing_thresh_write,
5198 .llseek = generic_file_llseek,
5199};
5200
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005201static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005202 .open = tracing_open_generic,
5203 .read = tracing_max_lat_read,
5204 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005205 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005206};
5207
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005208static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005209 .open = tracing_open_generic,
5210 .read = tracing_set_trace_read,
5211 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005212 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005213};
5214
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005215static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005216 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005217 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005218 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005219 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005220 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005221 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005222};
5223
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005224static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005225 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005226 .read = tracing_entries_read,
5227 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005228 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005229 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005230};
5231
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005232static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005233 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005234 .read = tracing_total_entries_read,
5235 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005236 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005237};
5238
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005239static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005240 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005241 .write = tracing_free_buffer_write,
5242 .release = tracing_free_buffer_release,
5243};
5244
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005245static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005247 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005248 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005249 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005250};
5251
Zhaolei5079f322009-08-25 16:12:56 +08005252static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005253 .open = tracing_clock_open,
5254 .read = seq_read,
5255 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005256 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005257 .write = tracing_clock_write,
5258};
5259
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005260#ifdef CONFIG_TRACER_SNAPSHOT
5261static const struct file_operations snapshot_fops = {
5262 .open = tracing_snapshot_open,
5263 .read = seq_read,
5264 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005265 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005266 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005267};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005268
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005269static const struct file_operations snapshot_raw_fops = {
5270 .open = snapshot_raw_open,
5271 .read = tracing_buffers_read,
5272 .release = tracing_buffers_release,
5273 .splice_read = tracing_buffers_splice_read,
5274 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005275};
5276
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005277#endif /* CONFIG_TRACER_SNAPSHOT */
5278
Steven Rostedt2cadf912008-12-01 22:20:19 -05005279static int tracing_buffers_open(struct inode *inode, struct file *filp)
5280{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005281 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005282 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005283 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005284
5285 if (tracing_disabled)
5286 return -ENODEV;
5287
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005288 if (trace_array_get(tr) < 0)
5289 return -ENODEV;
5290
Steven Rostedt2cadf912008-12-01 22:20:19 -05005291 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005292 if (!info) {
5293 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005294 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005295 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005296
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005297 mutex_lock(&trace_types_lock);
5298
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005299 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005300 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005301 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005302 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005303 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005304 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005305 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005306
5307 filp->private_data = info;
5308
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005309 mutex_unlock(&trace_types_lock);
5310
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005311 ret = nonseekable_open(inode, filp);
5312 if (ret < 0)
5313 trace_array_put(tr);
5314
5315 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316}
5317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005318static unsigned int
5319tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5320{
5321 struct ftrace_buffer_info *info = filp->private_data;
5322 struct trace_iterator *iter = &info->iter;
5323
5324 return trace_poll(iter, filp, poll_table);
5325}
5326
Steven Rostedt2cadf912008-12-01 22:20:19 -05005327static ssize_t
5328tracing_buffers_read(struct file *filp, char __user *ubuf,
5329 size_t count, loff_t *ppos)
5330{
5331 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005332 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005333 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005334 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005335
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005336 if (!count)
5337 return 0;
5338
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005339 mutex_lock(&trace_types_lock);
5340
5341#ifdef CONFIG_TRACER_MAX_TRACE
5342 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5343 size = -EBUSY;
5344 goto out_unlock;
5345 }
5346#endif
5347
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005348 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005349 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5350 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005351 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005352 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005353 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005354
Steven Rostedt2cadf912008-12-01 22:20:19 -05005355 /* Do we have previous read data to read? */
5356 if (info->read < PAGE_SIZE)
5357 goto read;
5358
Steven Rostedtb6273442013-02-28 13:44:11 -05005359 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005360 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005361 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362 &info->spare,
5363 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005364 iter->cpu_file, 0);
5365 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005366
5367 if (ret < 0) {
5368 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005369 if ((filp->f_flags & O_NONBLOCK)) {
5370 size = -EAGAIN;
5371 goto out_unlock;
5372 }
5373 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005374 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005375 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005376 if (ret) {
5377 size = ret;
5378 goto out_unlock;
5379 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005380 if (signal_pending(current)) {
5381 size = -EINTR;
5382 goto out_unlock;
5383 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005384 goto again;
5385 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005386 size = 0;
5387 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005388 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005389
Steven Rostedt436fc282011-10-14 10:44:25 -04005390 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005391 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005392 size = PAGE_SIZE - info->read;
5393 if (size > count)
5394 size = count;
5395
5396 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005397 if (ret == size) {
5398 size = -EFAULT;
5399 goto out_unlock;
5400 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005401 size -= ret;
5402
Steven Rostedt2cadf912008-12-01 22:20:19 -05005403 *ppos += size;
5404 info->read += size;
5405
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005406 out_unlock:
5407 mutex_unlock(&trace_types_lock);
5408
Steven Rostedt2cadf912008-12-01 22:20:19 -05005409 return size;
5410}
5411
5412static int tracing_buffers_release(struct inode *inode, struct file *file)
5413{
5414 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005415 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005416
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005417 mutex_lock(&trace_types_lock);
5418
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005419 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005420
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005421 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005422 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005423 kfree(info);
5424
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005425 mutex_unlock(&trace_types_lock);
5426
Steven Rostedt2cadf912008-12-01 22:20:19 -05005427 return 0;
5428}
5429
5430struct buffer_ref {
5431 struct ring_buffer *buffer;
5432 void *page;
5433 int ref;
5434};
5435
5436static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5437 struct pipe_buffer *buf)
5438{
5439 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5440
5441 if (--ref->ref)
5442 return;
5443
5444 ring_buffer_free_read_page(ref->buffer, ref->page);
5445 kfree(ref);
5446 buf->private = 0;
5447}
5448
Steven Rostedt2cadf912008-12-01 22:20:19 -05005449static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5450 struct pipe_buffer *buf)
5451{
5452 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5453
5454 ref->ref++;
5455}
5456
5457/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005458static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005459 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005460 .confirm = generic_pipe_buf_confirm,
5461 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005462 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005463 .get = buffer_pipe_buf_get,
5464};
5465
5466/*
5467 * Callback from splice_to_pipe(), if we need to release some pages
5468 * at the end of the spd in case we error'ed out in filling the pipe.
5469 */
5470static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5471{
5472 struct buffer_ref *ref =
5473 (struct buffer_ref *)spd->partial[i].private;
5474
5475 if (--ref->ref)
5476 return;
5477
5478 ring_buffer_free_read_page(ref->buffer, ref->page);
5479 kfree(ref);
5480 spd->partial[i].private = 0;
5481}
5482
5483static ssize_t
5484tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5485 struct pipe_inode_info *pipe, size_t len,
5486 unsigned int flags)
5487{
5488 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005489 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005490 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5491 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005492 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005493 .pages = pages_def,
5494 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005495 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005496 .flags = flags,
5497 .ops = &buffer_pipe_buf_ops,
5498 .spd_release = buffer_spd_release,
5499 };
5500 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005501 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005502 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005503
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005504 mutex_lock(&trace_types_lock);
5505
5506#ifdef CONFIG_TRACER_MAX_TRACE
5507 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5508 ret = -EBUSY;
5509 goto out;
5510 }
5511#endif
5512
5513 if (splice_grow_spd(pipe, &spd)) {
5514 ret = -ENOMEM;
5515 goto out;
5516 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005517
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005518 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005519 ret = -EINVAL;
5520 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005521 }
5522
5523 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005524 if (len < PAGE_SIZE) {
5525 ret = -EINVAL;
5526 goto out;
5527 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005528 len &= PAGE_MASK;
5529 }
5530
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005531 again:
5532 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005533 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005534
Al Viroa786c062014-04-11 12:01:03 -04005535 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005536 struct page *page;
5537 int r;
5538
5539 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5540 if (!ref)
5541 break;
5542
Steven Rostedt7267fa62009-04-29 00:16:21 -04005543 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005544 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005545 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005546 if (!ref->page) {
5547 kfree(ref);
5548 break;
5549 }
5550
5551 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005552 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005553 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005554 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005555 kfree(ref);
5556 break;
5557 }
5558
5559 /*
5560 * zero out any left over data, this is going to
5561 * user land.
5562 */
5563 size = ring_buffer_page_len(ref->page);
5564 if (size < PAGE_SIZE)
5565 memset(ref->page + size, 0, PAGE_SIZE - size);
5566
5567 page = virt_to_page(ref->page);
5568
5569 spd.pages[i] = page;
5570 spd.partial[i].len = PAGE_SIZE;
5571 spd.partial[i].offset = 0;
5572 spd.partial[i].private = (unsigned long)ref;
5573 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005574 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005575
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005576 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005577 }
5578
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005579 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005580 spd.nr_pages = i;
5581
5582 /* did we read anything? */
5583 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005584 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005585 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005586 goto out;
5587 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005588 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005589 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005590 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005591 if (ret)
5592 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005593 if (signal_pending(current)) {
5594 ret = -EINTR;
5595 goto out;
5596 }
5597 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005598 }
5599
5600 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005601 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005602out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005603 mutex_unlock(&trace_types_lock);
5604
Steven Rostedt2cadf912008-12-01 22:20:19 -05005605 return ret;
5606}
5607
5608static const struct file_operations tracing_buffers_fops = {
5609 .open = tracing_buffers_open,
5610 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005611 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005612 .release = tracing_buffers_release,
5613 .splice_read = tracing_buffers_splice_read,
5614 .llseek = no_llseek,
5615};
5616
Steven Rostedtc8d77182009-04-29 18:03:45 -04005617static ssize_t
5618tracing_stats_read(struct file *filp, char __user *ubuf,
5619 size_t count, loff_t *ppos)
5620{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005621 struct inode *inode = file_inode(filp);
5622 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005623 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005624 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005625 struct trace_seq *s;
5626 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005627 unsigned long long t;
5628 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005629
Li Zefane4f2d102009-06-15 10:57:28 +08005630 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005631 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005632 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005633
5634 trace_seq_init(s);
5635
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005636 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005637 trace_seq_printf(s, "entries: %ld\n", cnt);
5638
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005639 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005640 trace_seq_printf(s, "overrun: %ld\n", cnt);
5641
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005643 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5644
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005645 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005646 trace_seq_printf(s, "bytes: %ld\n", cnt);
5647
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005648 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005649 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005650 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005651 usec_rem = do_div(t, USEC_PER_SEC);
5652 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5653 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005654
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005655 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005656 usec_rem = do_div(t, USEC_PER_SEC);
5657 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5658 } else {
5659 /* counter or tsc mode for trace_clock */
5660 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005661 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005662
5663 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005664 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005665 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005666
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005667 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005668 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5669
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005670 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005671 trace_seq_printf(s, "read events: %ld\n", cnt);
5672
Steven Rostedtc8d77182009-04-29 18:03:45 -04005673 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5674
5675 kfree(s);
5676
5677 return count;
5678}
5679
5680static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005681 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005682 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005683 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005684 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005685};
5686
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005687#ifdef CONFIG_DYNAMIC_FTRACE
5688
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005689int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005690{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005691 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692}
5693
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005694static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005695tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005696 size_t cnt, loff_t *ppos)
5697{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005698 static char ftrace_dyn_info_buffer[1024];
5699 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005700 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005701 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005702 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703 int r;
5704
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005705 mutex_lock(&dyn_info_mutex);
5706 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707
Steven Rostedta26a2a22008-10-31 00:03:22 -04005708 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005709 buf[r++] = '\n';
5710
5711 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5712
5713 mutex_unlock(&dyn_info_mutex);
5714
5715 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716}
5717
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005718static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005719 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005720 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005721 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005722};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005723#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005724
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005725#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5726static void
5727ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005728{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005729 tracing_snapshot();
5730}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005731
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005732static void
5733ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5734{
5735 unsigned long *count = (long *)data;
5736
5737 if (!*count)
5738 return;
5739
5740 if (*count != -1)
5741 (*count)--;
5742
5743 tracing_snapshot();
5744}
5745
5746static int
5747ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5748 struct ftrace_probe_ops *ops, void *data)
5749{
5750 long count = (long)data;
5751
5752 seq_printf(m, "%ps:", (void *)ip);
5753
5754 seq_printf(m, "snapshot");
5755
5756 if (count == -1)
5757 seq_printf(m, ":unlimited\n");
5758 else
5759 seq_printf(m, ":count=%ld\n", count);
5760
5761 return 0;
5762}
5763
5764static struct ftrace_probe_ops snapshot_probe_ops = {
5765 .func = ftrace_snapshot,
5766 .print = ftrace_snapshot_print,
5767};
5768
5769static struct ftrace_probe_ops snapshot_count_probe_ops = {
5770 .func = ftrace_count_snapshot,
5771 .print = ftrace_snapshot_print,
5772};
5773
5774static int
5775ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5776 char *glob, char *cmd, char *param, int enable)
5777{
5778 struct ftrace_probe_ops *ops;
5779 void *count = (void *)-1;
5780 char *number;
5781 int ret;
5782
5783 /* hash funcs only work with set_ftrace_filter */
5784 if (!enable)
5785 return -EINVAL;
5786
5787 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5788
5789 if (glob[0] == '!') {
5790 unregister_ftrace_function_probe_func(glob+1, ops);
5791 return 0;
5792 }
5793
5794 if (!param)
5795 goto out_reg;
5796
5797 number = strsep(&param, ":");
5798
5799 if (!strlen(number))
5800 goto out_reg;
5801
5802 /*
5803 * We use the callback data field (which is a pointer)
5804 * as our counter.
5805 */
5806 ret = kstrtoul(number, 0, (unsigned long *)&count);
5807 if (ret)
5808 return ret;
5809
5810 out_reg:
5811 ret = register_ftrace_function_probe(glob, ops, count);
5812
5813 if (ret >= 0)
5814 alloc_snapshot(&global_trace);
5815
5816 return ret < 0 ? ret : 0;
5817}
5818
5819static struct ftrace_func_command ftrace_snapshot_cmd = {
5820 .name = "snapshot",
5821 .func = ftrace_trace_snapshot_callback,
5822};
5823
Tom Zanussi38de93a2013-10-24 08:34:18 -05005824static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005825{
5826 return register_ftrace_command(&ftrace_snapshot_cmd);
5827}
5828#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005829static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005830#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005831
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005832struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005833{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005834 if (tr->dir)
5835 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005836
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005837 if (!debugfs_initialized())
5838 return NULL;
5839
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005840 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5841 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005842
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005843 if (!tr->dir)
5844 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005846 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005847}
5848
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005849struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005850{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005851 return tracing_init_dentry_tr(&global_trace);
5852}
5853
5854static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5855{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005856 struct dentry *d_tracer;
5857
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005858 if (tr->percpu_dir)
5859 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005860
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005861 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005862 if (!d_tracer)
5863 return NULL;
5864
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005865 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005866
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005867 WARN_ONCE(!tr->percpu_dir,
5868 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005869
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005871}
5872
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005873static struct dentry *
5874trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5875 void *data, long cpu, const struct file_operations *fops)
5876{
5877 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5878
5879 if (ret) /* See tracing_get_cpu() */
5880 ret->d_inode->i_cdev = (void *)(cpu + 1);
5881 return ret;
5882}
5883
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005884static void
5885tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005886{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005887 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005888 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005889 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005890
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005891 if (!d_percpu)
5892 return;
5893
Steven Rostedtdd49a382010-10-20 21:51:26 -04005894 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005895 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5896 if (!d_cpu) {
5897 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5898 return;
5899 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005900
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005901 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005902 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005903 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005904
5905 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005906 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005907 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005908
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005909 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005910 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005911
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005912 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005913 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005914
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005915 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005916 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005917
5918#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005919 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005920 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005921
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005922 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005923 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005924#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005925}
5926
Steven Rostedt60a11772008-05-12 21:20:44 +02005927#ifdef CONFIG_FTRACE_SELFTEST
5928/* Let selftest have access to static functions in this file */
5929#include "trace_selftest.c"
5930#endif
5931
Steven Rostedt577b7852009-02-26 23:43:05 -05005932struct trace_option_dentry {
5933 struct tracer_opt *opt;
5934 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005935 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005936 struct dentry *entry;
5937};
5938
5939static ssize_t
5940trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5941 loff_t *ppos)
5942{
5943 struct trace_option_dentry *topt = filp->private_data;
5944 char *buf;
5945
5946 if (topt->flags->val & topt->opt->bit)
5947 buf = "1\n";
5948 else
5949 buf = "0\n";
5950
5951 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5952}
5953
5954static ssize_t
5955trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5956 loff_t *ppos)
5957{
5958 struct trace_option_dentry *topt = filp->private_data;
5959 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005960 int ret;
5961
Peter Huewe22fe9b52011-06-07 21:58:27 +02005962 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5963 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005964 return ret;
5965
Li Zefan8d18eaa2009-12-08 11:17:06 +08005966 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005967 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005968
5969 if (!!(topt->flags->val & topt->opt->bit) != val) {
5970 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005971 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005972 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005973 mutex_unlock(&trace_types_lock);
5974 if (ret)
5975 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005976 }
5977
5978 *ppos += cnt;
5979
5980 return cnt;
5981}
5982
5983
5984static const struct file_operations trace_options_fops = {
5985 .open = tracing_open_generic,
5986 .read = trace_options_read,
5987 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005988 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005989};
5990
Steven Rostedta8259072009-02-26 22:19:12 -05005991static ssize_t
5992trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5993 loff_t *ppos)
5994{
5995 long index = (long)filp->private_data;
5996 char *buf;
5997
5998 if (trace_flags & (1 << index))
5999 buf = "1\n";
6000 else
6001 buf = "0\n";
6002
6003 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6004}
6005
6006static ssize_t
6007trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6008 loff_t *ppos)
6009{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006010 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006011 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006012 unsigned long val;
6013 int ret;
6014
Peter Huewe22fe9b52011-06-07 21:58:27 +02006015 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6016 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006017 return ret;
6018
Zhaoleif2d84b62009-08-07 18:55:48 +08006019 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006020 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006021
6022 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006023 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006024 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006025
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006026 if (ret < 0)
6027 return ret;
6028
Steven Rostedta8259072009-02-26 22:19:12 -05006029 *ppos += cnt;
6030
6031 return cnt;
6032}
6033
Steven Rostedta8259072009-02-26 22:19:12 -05006034static const struct file_operations trace_options_core_fops = {
6035 .open = tracing_open_generic,
6036 .read = trace_options_core_read,
6037 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006038 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006039};
6040
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006041struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006042 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006043 struct dentry *parent,
6044 void *data,
6045 const struct file_operations *fops)
6046{
6047 struct dentry *ret;
6048
6049 ret = debugfs_create_file(name, mode, parent, data, fops);
6050 if (!ret)
6051 pr_warning("Could not create debugfs '%s' entry\n", name);
6052
6053 return ret;
6054}
6055
6056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006058{
6059 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006060
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006061 if (tr->options)
6062 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006063
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006064 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006065 if (!d_tracer)
6066 return NULL;
6067
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068 tr->options = debugfs_create_dir("options", d_tracer);
6069 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006070 pr_warning("Could not create debugfs directory 'options'\n");
6071 return NULL;
6072 }
6073
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006075}
6076
Steven Rostedt577b7852009-02-26 23:43:05 -05006077static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006078create_trace_option_file(struct trace_array *tr,
6079 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006080 struct tracer_flags *flags,
6081 struct tracer_opt *opt)
6082{
6083 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006084
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006085 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006086 if (!t_options)
6087 return;
6088
6089 topt->flags = flags;
6090 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006091 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006092
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006093 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006094 &trace_options_fops);
6095
Steven Rostedt577b7852009-02-26 23:43:05 -05006096}
6097
6098static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006099create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006100{
6101 struct trace_option_dentry *topts;
6102 struct tracer_flags *flags;
6103 struct tracer_opt *opts;
6104 int cnt;
6105
6106 if (!tracer)
6107 return NULL;
6108
6109 flags = tracer->flags;
6110
6111 if (!flags || !flags->opts)
6112 return NULL;
6113
6114 opts = flags->opts;
6115
6116 for (cnt = 0; opts[cnt].name; cnt++)
6117 ;
6118
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006119 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006120 if (!topts)
6121 return NULL;
6122
6123 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006124 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006125 &opts[cnt]);
6126
6127 return topts;
6128}
6129
6130static void
6131destroy_trace_option_files(struct trace_option_dentry *topts)
6132{
6133 int cnt;
6134
6135 if (!topts)
6136 return;
6137
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006138 for (cnt = 0; topts[cnt].opt; cnt++)
6139 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006140
6141 kfree(topts);
6142}
6143
Steven Rostedta8259072009-02-26 22:19:12 -05006144static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006145create_trace_option_core_file(struct trace_array *tr,
6146 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006147{
6148 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006149
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006150 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006151 if (!t_options)
6152 return NULL;
6153
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006154 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006155 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006156}
6157
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006158static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006159{
6160 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006161 int i;
6162
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006163 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006164 if (!t_options)
6165 return;
6166
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006167 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006168 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006169}
6170
Steven Rostedt499e5472012-02-22 15:50:28 -05006171static ssize_t
6172rb_simple_read(struct file *filp, char __user *ubuf,
6173 size_t cnt, loff_t *ppos)
6174{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006175 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006176 char buf[64];
6177 int r;
6178
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006179 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006180 r = sprintf(buf, "%d\n", r);
6181
6182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6183}
6184
6185static ssize_t
6186rb_simple_write(struct file *filp, const char __user *ubuf,
6187 size_t cnt, loff_t *ppos)
6188{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006189 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006190 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006191 unsigned long val;
6192 int ret;
6193
6194 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6195 if (ret)
6196 return ret;
6197
6198 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006199 mutex_lock(&trace_types_lock);
6200 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006201 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006202 if (tr->current_trace->start)
6203 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006204 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006205 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006206 if (tr->current_trace->stop)
6207 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006208 }
6209 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006210 }
6211
6212 (*ppos)++;
6213
6214 return cnt;
6215}
6216
6217static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006218 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006219 .read = rb_simple_read,
6220 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006221 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006222 .llseek = default_llseek,
6223};
6224
Steven Rostedt277ba042012-08-03 16:10:49 -04006225struct dentry *trace_instance_dir;
6226
6227static void
6228init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6229
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006230static int
6231allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006232{
6233 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006234
6235 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6236
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006237 buf->tr = tr;
6238
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006239 buf->buffer = ring_buffer_alloc(size, rb_flags);
6240 if (!buf->buffer)
6241 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006242
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006243 buf->data = alloc_percpu(struct trace_array_cpu);
6244 if (!buf->data) {
6245 ring_buffer_free(buf->buffer);
6246 return -ENOMEM;
6247 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006248
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006249 /* Allocate the first page for all buffers */
6250 set_buffer_entries(&tr->trace_buffer,
6251 ring_buffer_size(tr->trace_buffer.buffer, 0));
6252
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006253 return 0;
6254}
6255
6256static int allocate_trace_buffers(struct trace_array *tr, int size)
6257{
6258 int ret;
6259
6260 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6261 if (ret)
6262 return ret;
6263
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006264#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006265 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6266 allocate_snapshot ? size : 1);
6267 if (WARN_ON(ret)) {
6268 ring_buffer_free(tr->trace_buffer.buffer);
6269 free_percpu(tr->trace_buffer.data);
6270 return -ENOMEM;
6271 }
6272 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006273
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006274 /*
6275 * Only the top level trace array gets its snapshot allocated
6276 * from the kernel command line.
6277 */
6278 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006279#endif
6280 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006281}
6282
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006283static void free_trace_buffer(struct trace_buffer *buf)
6284{
6285 if (buf->buffer) {
6286 ring_buffer_free(buf->buffer);
6287 buf->buffer = NULL;
6288 free_percpu(buf->data);
6289 buf->data = NULL;
6290 }
6291}
6292
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006293static void free_trace_buffers(struct trace_array *tr)
6294{
6295 if (!tr)
6296 return;
6297
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006298 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006299
6300#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006301 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006302#endif
6303}
6304
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006305static int new_instance_create(const char *name)
6306{
Steven Rostedt277ba042012-08-03 16:10:49 -04006307 struct trace_array *tr;
6308 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006309
6310 mutex_lock(&trace_types_lock);
6311
6312 ret = -EEXIST;
6313 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6314 if (tr->name && strcmp(tr->name, name) == 0)
6315 goto out_unlock;
6316 }
6317
6318 ret = -ENOMEM;
6319 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6320 if (!tr)
6321 goto out_unlock;
6322
6323 tr->name = kstrdup(name, GFP_KERNEL);
6324 if (!tr->name)
6325 goto out_free_tr;
6326
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006327 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6328 goto out_free_tr;
6329
6330 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6331
Steven Rostedt277ba042012-08-03 16:10:49 -04006332 raw_spin_lock_init(&tr->start_lock);
6333
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006334 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6335
Steven Rostedt277ba042012-08-03 16:10:49 -04006336 tr->current_trace = &nop_trace;
6337
6338 INIT_LIST_HEAD(&tr->systems);
6339 INIT_LIST_HEAD(&tr->events);
6340
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006341 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006342 goto out_free_tr;
6343
Steven Rostedt277ba042012-08-03 16:10:49 -04006344 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6345 if (!tr->dir)
6346 goto out_free_tr;
6347
6348 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006349 if (ret) {
6350 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006351 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006352 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006353
6354 init_tracer_debugfs(tr, tr->dir);
6355
6356 list_add(&tr->list, &ftrace_trace_arrays);
6357
6358 mutex_unlock(&trace_types_lock);
6359
6360 return 0;
6361
6362 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006363 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006364 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006365 kfree(tr->name);
6366 kfree(tr);
6367
6368 out_unlock:
6369 mutex_unlock(&trace_types_lock);
6370
6371 return ret;
6372
6373}
6374
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006375static int instance_delete(const char *name)
6376{
6377 struct trace_array *tr;
6378 int found = 0;
6379 int ret;
6380
6381 mutex_lock(&trace_types_lock);
6382
6383 ret = -ENODEV;
6384 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6385 if (tr->name && strcmp(tr->name, name) == 0) {
6386 found = 1;
6387 break;
6388 }
6389 }
6390 if (!found)
6391 goto out_unlock;
6392
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006393 ret = -EBUSY;
6394 if (tr->ref)
6395 goto out_unlock;
6396
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006397 list_del(&tr->list);
6398
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006399 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006400 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006401 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006402 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006403 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006404
6405 kfree(tr->name);
6406 kfree(tr);
6407
6408 ret = 0;
6409
6410 out_unlock:
6411 mutex_unlock(&trace_types_lock);
6412
6413 return ret;
6414}
6415
Steven Rostedt277ba042012-08-03 16:10:49 -04006416static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6417{
6418 struct dentry *parent;
6419 int ret;
6420
6421 /* Paranoid: Make sure the parent is the "instances" directory */
6422 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6423 if (WARN_ON_ONCE(parent != trace_instance_dir))
6424 return -ENOENT;
6425
6426 /*
6427 * The inode mutex is locked, but debugfs_create_dir() will also
6428 * take the mutex. As the instances directory can not be destroyed
6429 * or changed in any other way, it is safe to unlock it, and
6430 * let the dentry try. If two users try to make the same dir at
6431 * the same time, then the new_instance_create() will determine the
6432 * winner.
6433 */
6434 mutex_unlock(&inode->i_mutex);
6435
6436 ret = new_instance_create(dentry->d_iname);
6437
6438 mutex_lock(&inode->i_mutex);
6439
6440 return ret;
6441}
6442
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006443static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6444{
6445 struct dentry *parent;
6446 int ret;
6447
6448 /* Paranoid: Make sure the parent is the "instances" directory */
6449 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6450 if (WARN_ON_ONCE(parent != trace_instance_dir))
6451 return -ENOENT;
6452
6453 /* The caller did a dget() on dentry */
6454 mutex_unlock(&dentry->d_inode->i_mutex);
6455
6456 /*
6457 * The inode mutex is locked, but debugfs_create_dir() will also
6458 * take the mutex. As the instances directory can not be destroyed
6459 * or changed in any other way, it is safe to unlock it, and
6460 * let the dentry try. If two users try to make the same dir at
6461 * the same time, then the instance_delete() will determine the
6462 * winner.
6463 */
6464 mutex_unlock(&inode->i_mutex);
6465
6466 ret = instance_delete(dentry->d_iname);
6467
6468 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6469 mutex_lock(&dentry->d_inode->i_mutex);
6470
6471 return ret;
6472}
6473
Steven Rostedt277ba042012-08-03 16:10:49 -04006474static const struct inode_operations instance_dir_inode_operations = {
6475 .lookup = simple_lookup,
6476 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006477 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006478};
6479
6480static __init void create_trace_instances(struct dentry *d_tracer)
6481{
6482 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6483 if (WARN_ON(!trace_instance_dir))
6484 return;
6485
6486 /* Hijack the dir inode operations, to allow mkdir */
6487 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6488}
6489
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006490static void
6491init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6492{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006493 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006495 trace_create_file("available_tracers", 0444, d_tracer,
6496 tr, &show_traces_fops);
6497
6498 trace_create_file("current_tracer", 0644, d_tracer,
6499 tr, &set_tracer_fops);
6500
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006501 trace_create_file("tracing_cpumask", 0644, d_tracer,
6502 tr, &tracing_cpumask_fops);
6503
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006504 trace_create_file("trace_options", 0644, d_tracer,
6505 tr, &tracing_iter_fops);
6506
6507 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006508 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006509
6510 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006511 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006512
6513 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006514 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006515
6516 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6517 tr, &tracing_total_entries_fops);
6518
Wang YanQing238ae932013-05-26 16:52:01 +08006519 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006520 tr, &tracing_free_buffer_fops);
6521
6522 trace_create_file("trace_marker", 0220, d_tracer,
6523 tr, &tracing_mark_fops);
6524
6525 trace_create_file("trace_clock", 0644, d_tracer, tr,
6526 &trace_clock_fops);
6527
6528 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006529 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006530
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006531#ifdef CONFIG_TRACER_MAX_TRACE
6532 trace_create_file("tracing_max_latency", 0644, d_tracer,
6533 &tr->max_latency, &tracing_max_lat_fops);
6534#endif
6535
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006536 if (ftrace_create_function_files(tr, d_tracer))
6537 WARN(1, "Could not allocate function filter files");
6538
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006539#ifdef CONFIG_TRACER_SNAPSHOT
6540 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006541 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006542#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006543
6544 for_each_tracing_cpu(cpu)
6545 tracing_init_debugfs_percpu(tr, cpu);
6546
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006547}
6548
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006549static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006550{
6551 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006552
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006553 trace_access_lock_init();
6554
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006555 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006556 if (!d_tracer)
6557 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006558
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006559 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006560
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006561 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006562 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006563
Li Zefan339ae5d2009-04-17 10:34:30 +08006564 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006565 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006566
Avadh Patel69abe6a2009-04-10 16:04:48 -04006567 trace_create_file("saved_cmdlines", 0444, d_tracer,
6568 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006569
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006570 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6571 NULL, &tracing_saved_cmdlines_size_fops);
6572
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006573#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006574 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6575 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006576#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006577
Steven Rostedt277ba042012-08-03 16:10:49 -04006578 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006579
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006580 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006581
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006582 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006583}
6584
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006585static int trace_panic_handler(struct notifier_block *this,
6586 unsigned long event, void *unused)
6587{
Steven Rostedt944ac422008-10-23 19:26:08 -04006588 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006589 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006590 return NOTIFY_OK;
6591}
6592
6593static struct notifier_block trace_panic_notifier = {
6594 .notifier_call = trace_panic_handler,
6595 .next = NULL,
6596 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6597};
6598
6599static int trace_die_handler(struct notifier_block *self,
6600 unsigned long val,
6601 void *data)
6602{
6603 switch (val) {
6604 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006605 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006606 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006607 break;
6608 default:
6609 break;
6610 }
6611 return NOTIFY_OK;
6612}
6613
6614static struct notifier_block trace_die_notifier = {
6615 .notifier_call = trace_die_handler,
6616 .priority = 200
6617};
6618
6619/*
6620 * printk is set to max of 1024, we really don't need it that big.
6621 * Nothing should be printing 1000 characters anyway.
6622 */
6623#define TRACE_MAX_PRINT 1000
6624
6625/*
6626 * Define here KERN_TRACE so that we have one place to modify
6627 * it if we decide to change what log level the ftrace dump
6628 * should be at.
6629 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006630#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006631
Jason Wessel955b61e2010-08-05 09:22:23 -05006632void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006633trace_printk_seq(struct trace_seq *s)
6634{
6635 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006636 if (s->len >= TRACE_MAX_PRINT)
6637 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006638
6639 /* should be zero ended, but we are paranoid. */
6640 s->buffer[s->len] = 0;
6641
6642 printk(KERN_TRACE "%s", s->buffer);
6643
Steven Rostedtf9520752009-03-02 14:04:40 -05006644 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006645}
6646
Jason Wessel955b61e2010-08-05 09:22:23 -05006647void trace_init_global_iter(struct trace_iterator *iter)
6648{
6649 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006650 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006651 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006652 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006653
6654 if (iter->trace && iter->trace->open)
6655 iter->trace->open(iter);
6656
6657 /* Annotate start of buffers if we had overruns */
6658 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6659 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6660
6661 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6662 if (trace_clocks[iter->tr->clock_id].in_ns)
6663 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006664}
6665
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006666void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006667{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006668 /* use static because iter can be a bit big for the stack */
6669 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006670 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006671 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006672 unsigned long flags;
6673 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006674
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006675 /* Only allow one dump user at a time. */
6676 if (atomic_inc_return(&dump_running) != 1) {
6677 atomic_dec(&dump_running);
6678 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006679 }
6680
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006681 /*
6682 * Always turn off tracing when we dump.
6683 * We don't need to show trace output of what happens
6684 * between multiple crashes.
6685 *
6686 * If the user does a sysrq-z, then they can re-enable
6687 * tracing with echo 1 > tracing_on.
6688 */
6689 tracing_off();
6690
6691 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006692
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006693 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006694 trace_init_global_iter(&iter);
6695
Steven Rostedtd7690412008-10-01 00:29:53 -04006696 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006697 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006698 }
6699
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006700 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6701
Török Edwinb54d3de2008-11-22 13:28:48 +02006702 /* don't look at user memory in panic mode */
6703 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6704
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006705 switch (oops_dump_mode) {
6706 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006707 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006708 break;
6709 case DUMP_ORIG:
6710 iter.cpu_file = raw_smp_processor_id();
6711 break;
6712 case DUMP_NONE:
6713 goto out_enable;
6714 default:
6715 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006716 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006717 }
6718
6719 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006720
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006721 /* Did function tracer already get disabled? */
6722 if (ftrace_is_dead()) {
6723 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6724 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6725 }
6726
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006727 /*
6728 * We need to stop all tracing on all CPUS to read the
6729 * the next buffer. This is a bit expensive, but is
6730 * not done often. We fill all what we can read,
6731 * and then release the locks again.
6732 */
6733
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006734 while (!trace_empty(&iter)) {
6735
6736 if (!cnt)
6737 printk(KERN_TRACE "---------------------------------\n");
6738
6739 cnt++;
6740
6741 /* reset all but tr, trace, and overruns */
6742 memset(&iter.seq, 0,
6743 sizeof(struct trace_iterator) -
6744 offsetof(struct trace_iterator, seq));
6745 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6746 iter.pos = -1;
6747
Jason Wessel955b61e2010-08-05 09:22:23 -05006748 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006749 int ret;
6750
6751 ret = print_trace_line(&iter);
6752 if (ret != TRACE_TYPE_NO_CONSUME)
6753 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006754 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006755 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006756
6757 trace_printk_seq(&iter.seq);
6758 }
6759
6760 if (!cnt)
6761 printk(KERN_TRACE " (ftrace buffer empty)\n");
6762 else
6763 printk(KERN_TRACE "---------------------------------\n");
6764
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006765 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006766 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006767
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006768 for_each_tracing_cpu(cpu) {
6769 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006770 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006771 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006772 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006773}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006774EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006775
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006776__init static int tracer_alloc_buffers(void)
6777{
Steven Rostedt73c51622009-03-11 13:42:01 -04006778 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306779 int ret = -ENOMEM;
6780
David Sharp750912f2010-12-08 13:46:47 -08006781
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306782 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6783 goto out;
6784
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006785 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306786 goto out_free_buffer_mask;
6787
Steven Rostedt07d777f2011-09-22 14:01:55 -04006788 /* Only allocate trace_printk buffers if a trace_printk exists */
6789 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006790 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006791 trace_printk_init_buffers();
6792
Steven Rostedt73c51622009-03-11 13:42:01 -04006793 /* To save memory, keep the ring buffer size to its minimum */
6794 if (ring_buffer_expanded)
6795 ring_buf_size = trace_buf_size;
6796 else
6797 ring_buf_size = 1;
6798
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306799 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006800 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006801
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006802 raw_spin_lock_init(&global_trace.start_lock);
6803
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006804 /* Used for event triggers */
6805 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6806 if (!temp_buffer)
6807 goto out_free_cpumask;
6808
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006809 if (trace_create_savedcmd() < 0)
6810 goto out_free_temp_buffer;
6811
Steven Rostedtab464282008-05-12 21:21:00 +02006812 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006813 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006814 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6815 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006816 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006817 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006818
Steven Rostedt499e5472012-02-22 15:50:28 -05006819 if (global_trace.buffer_disabled)
6820 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006821
Steven Rostedte1e232c2014-02-10 23:38:46 -05006822 if (trace_boot_clock) {
6823 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6824 if (ret < 0)
6825 pr_warning("Trace clock %s not defined, going back to default\n",
6826 trace_boot_clock);
6827 }
6828
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006829 /*
6830 * register_tracer() might reference current_trace, so it
6831 * needs to be set before we register anything. This is
6832 * just a bootstrap of current_trace anyway.
6833 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006834 global_trace.current_trace = &nop_trace;
6835
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006836 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6837
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006838 ftrace_init_global_array_ops(&global_trace);
6839
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006840 register_tracer(&nop_trace);
6841
Steven Rostedt60a11772008-05-12 21:20:44 +02006842 /* All seems OK, enable tracing */
6843 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006844
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006845 atomic_notifier_chain_register(&panic_notifier_list,
6846 &trace_panic_notifier);
6847
6848 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006849
Steven Rostedtae63b312012-05-03 23:09:03 -04006850 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6851
6852 INIT_LIST_HEAD(&global_trace.systems);
6853 INIT_LIST_HEAD(&global_trace.events);
6854 list_add(&global_trace.list, &ftrace_trace_arrays);
6855
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006856 while (trace_boot_options) {
6857 char *option;
6858
6859 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006860 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006861 }
6862
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006863 register_snapshot_cmd();
6864
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006865 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006866
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006867out_free_savedcmd:
6868 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006869out_free_temp_buffer:
6870 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306871out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006872 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306873out_free_buffer_mask:
6874 free_cpumask_var(tracing_buffer_mask);
6875out:
6876 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006877}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006878
6879__init static int clear_boot_tracer(void)
6880{
6881 /*
6882 * The default tracer at boot buffer is an init section.
6883 * This function is called in lateinit. If we did not
6884 * find the boot tracer, then clear it out, to prevent
6885 * later registration from accessing the buffer that is
6886 * about to be freed.
6887 */
6888 if (!default_bootup_tracer)
6889 return 0;
6890
6891 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6892 default_bootup_tracer);
6893 default_bootup_tracer = NULL;
6894
6895 return 0;
6896}
6897
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006898early_initcall(tracer_alloc_buffers);
6899fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006900late_initcall(clear_boot_tracer);